diff --git a/.eslintrc.js b/.eslintrc.js index 31f33737..ad852d2d 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -1,31 +1,9 @@ -const fs = require('fs'); -const graphql = require('graphql'); -const gatsbyESLintConfig = require('gatsby/dist/utils/eslint-config'); -const gatsbyWebpackUtils = require('gatsby/dist/utils/webpack-utils'); - -const graphQLSchema = (() => { - let str; - try { - str = fs.readFileSync('./schema.graphql').toString(); - } catch (err) { - console.log("Warning: File ./schema.graphql does not exist;\n"+ - " ESLint output regarding GraphQL queries may be inaccurate.\n"+ - " Run a Gatsby command to generate the schema file.") - str = "type Query {\n\tempty(filter: Int!): Int\n}\n"; - } - return graphql.buildSchema(str); -})(); -const usingJSXRuntime = gatsbyWebpackUtils.reactHasJsxRuntime(); - -const baseConfig = gatsbyESLintConfig.eslintConfig(graphQLSchema, usingJSXRuntime).baseConfig; - module.exports = { - ...baseConfig, - + globals: { + __PATH_PREFIX__: true, + }, + extends: `react-app`, ignorePatterns: [ "/public/", ], - rules: { - ...baseConfig.rules, - }, }; diff --git a/.github/workflows/blc.yaml b/.github/workflows/blc.yaml index 8181e9e6..46c28dd4 100644 --- a/.github/workflows/blc.yaml +++ b/.github/workflows/blc.yaml @@ -4,16 +4,12 @@ jobs: "Check": runs-on: ubuntu-latest steps: - - uses: actions/setup-node@v2 - with: - node-version: '16' - - uses: actions/setup-python@v2 - with: - python-version: '^3.9' - - uses: actions/checkout@v2 + - uses: actions/setup-node@v4 + - uses: actions/setup-python@v5 + - uses: actions/checkout@v4 with: path: site - - uses: actions/checkout@master + - uses: actions/checkout@v4 with: repository: datawire/getambassador.io-blc2 path: blc diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ba35d542..c48ce33e 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -4,17 +4,13 @@ jobs: "Lint": runs-on: ubuntu-latest steps: - - uses: actions/setup-node@v2 - with: - node-version: '16' - - uses: actions/checkout@v2 + - uses: actions/setup-node@v4 + - uses: actions/checkout@v4 - run: yarn install - name: "yarn run gatsby build" run: | # The sed part of this command removes terminal escape codes. - OVERRIDE_NODE_ENV=development yarn run gatsby build 2> >(tee /dev/stdout | sed $'s/\e[^a-z]*[a-z]//g' > err.log) - - run: | - ! grep ^ err.log + NODE_ENV=development yarn run gatsby build - name: "Dirty check" run: | git add . diff --git a/.node-version b/.node-version new file mode 100644 index 00000000..a81debae --- /dev/null +++ b/.node-version @@ -0,0 +1 @@ +v20.12.2 diff --git a/DEVELOPING.md b/DEVELOPING.md index 4a8789e8..6abff0f5 100644 --- a/DEVELOPING.md +++ b/DEVELOPING.md @@ -1,19 +1,6 @@ # How to hack on this ## Releasing Docs for new Telepresence Versions -If you are only making changes to the `docs/` directory and those changes -aren't ready to go out immediately (for example, if they are for a future -telepresence release), please make those changes in the -[telepresence repository](https://github.com/telepresenceio/telepresence). - -If you are making changes to more than the `docs/` and those changes are -for a future release, merge those changes into a branch `rel/x.y.z`. - -When it is time to do a telepresence release, the docs will be pushed to -the [docs repo](https://github.com/telepresenceio/docs). From there, you can -create (or checkout if it already exists) a `rel/x.y.z` branch on this repo -and run `make pull-docs`. Once you merge that PR, the website will update -with the docs for the new release. ## Local development quickstart @@ -29,10 +16,11 @@ Commands of interest: yarn install # Install dependencies in to ./node_modules/ # Development + export NODE_OPTIONS=--openssl-legacy-provider yarn run gatsby develop # Serve a hot-reloading development-build at http://localhost:8000/ yarn run gatsby repl # Run a Node.js REPL in the Gatsby environment yarn run eslint . # Run the linter - make pull-docs # Update ./docs/ from ambassador-docs.git + make pull-docs # Update ./docs/ from telepresenceio/telepresence.git # Production or production-like yarn run gatsby build # Build a production-build, writing it to ./public/ @@ -191,11 +179,10 @@ So we should upgrade Gatsby and associated plugins, and then turn The docs-build machinery in of `package.json`, `gatsby-config.js`, `gatsby-node.js`, `src/assets/` and `src/components/` seem obviously -want be a separate reusable module, so that these things don't need to -be manually kept in-sync between getambassador.io, telepresence.io, -and emissaryingress.io. This seems to be mostly obvious an trival to -do... except for I can't figure how to handle -`src/templates/doc-page.js` to where it's sufficiently +want to be a separate reusable module, so that these things don't need to +be manually kept in-sync between telepresence.io, and telepresence. +This seems to be mostly obvious an trival to do... except for I can't +figure how to handle `src/templates/doc-page.js` to where it's sufficiently parametarized/pluggable, or a way to make it possible to plug in the site-specific one. diff --git a/Makefile b/Makefile index a37ac14d..40bd63c1 100644 --- a/Makefile +++ b/Makefile @@ -1,41 +1,24 @@ -define nl - - -endef - -subtree-preflight: - @if ! grep -q -e 'has_been_added' $$(PATH=$$(git --exec-path):$$PATH which git-subtree 2>/dev/null) /dev/null; then \ - printf '$(RED)Please upgrade your git-subtree:$(END)\n'; \ - printf '$(BLD) sudo curl -fL https://raw.githubusercontent.com/LukeShu/git/lukeshu/next/2021-05-15/contrib/subtree/git-subtree.sh -o $$(git --exec-path)/git-subtree && sudo chmod 755 $$(git --exec-path)/git-subtree$(END)\n'; \ - false; \ - else \ - printf '$(GRN)git-subtree OK$(END)\n'; \ +# Ensure that the telepresence remote is up-to-date. +telepresence-remote: + @if [ "$$(git remote | grep -E '^telepresence$$')" = 'telepresence' ]; then\ + git remote update telepresence;\ + else\ + git remote add -f telepresence git@github.com:telepresenceio/telepresence.git;\ fi - git gc -.PHONY: subtree-preflight +.PHONY: telepresence-remote -PULL_PREFIX ?= -PUSH_PREFIX ?= $(USER)/from-telepresence.io-$(shell date +%Y-%m-%d)/ +# MATCH_TAGS is the regexp matching the tags that we expect will have docs. +MATCH_TAGS ?= ^v2\.[2-9][0-9]+\.[0-9]+(-(rc|test)\.[0-9]+)$$ -dir2branch = $(patsubst docs/%,release/%,$(subst pre-release,v2,$1)) +# EXCLUDE_TAGS is used when we want to exclude some of the matching tags from the telepresence repository +EXCLUDE_TAGS ?= -# Used when syncing from telepresenceio since that repo doesn't -# have docs for v1. -EXCLUDE_DIR ?= "" -pull-docs: ## Update ./docs from https://github.com/telepresenceio/docs -pull-docs: subtree-preflight - $(foreach subdir,$(shell find docs -mindepth 1 -maxdepth 1 -type d -not -name $(EXCLUDE_DIR)|sort -V),\ - git subtree pull --squash --prefix=$(subdir) https://github.com/telepresenceio/docs $(PULL_PREFIX)$(call dir2branch,$(subdir))$(nl)) +# Update the docs at docs/v. from the found tags. +pull-docs: + $(foreach release,$(shell git tag -l | grep -E '$(MATCH_TAGS)' | (test -n '$(EXCLUDE_TAGS)' && grep -vE '$(EXCLUDE_TAGS)' || cat) | sort -V),\ + dir=$$(expr '$(release)' : '\(v[0-9]\.[0-9][0-9]*\)');\ + echo $$dir;\ + rm -rf docs/$$dir;\ + git add docs;\ + git read-tree --prefix docs/$$dir -u $(release):docs) .PHONY: pull-docs - -PUSH_BRANCH ?= $(USER)/from-telepresence.io-$(shell date +%Y-%m-%d) -push-docs: ## Publish ./ambassador to https://github.com/telepresenceio/docs -push-docs: subtree-preflight - @PS4=; set -x; { \ - git remote add --no-tags remote-docs https://github.com/telepresenceio/docs && \ - git remote set-url --push remote-docs git@github.com:telepresenceio/docs && \ - :; } || true - git fetch --prune remote-docs - $(foreach subdir,$(shell find docs -mindepth 1 -maxdepth 1 -type d|sort -V),\ - git subtree push --rejoin --squash --prefix=$(subdir) remote-docs $(PUSH_PREFIX)$(call dir2branch,$(subdir))$(nl)) -.PHONY: push-docs diff --git a/bin/serve.js b/bin/serve.js index 29a8bf9c..d7200c31 100755 --- a/bin/serve.js +++ b/bin/serve.js @@ -42,7 +42,7 @@ function matchesRedirect(forcefulOnly, requestURL, redirect) { function doRedirect(requestURL, response, redirect) { let location = redirect.to; - if (!url.parse(location).search) { + if (!new URL(location).search) { location += (requestURL.search||''); } response.writeHead(redirect.status, { @@ -54,8 +54,8 @@ function doRedirect(requestURL, response, redirect) { server.on('request', async (request, response) => { console.log(request.method, request.url); - const requestURL = url.parse(url.resolve('/', request.url)); - if (requestURL.protocol || requestURL.slashes || requestURL.host) { + const requestURL = new URL(url.resolve('/', request.url)); + if (requestURL.protocol || requestURL.host) { response.writeHead(400); response.end('Bad request URL'); } diff --git a/docs-config.js b/docs-config.js index de83ea6c..f13bd7fa 100644 --- a/docs-config.js +++ b/docs-config.js @@ -16,8 +16,7 @@ module.exports = { return '/docs/' + (node.relativePath .replaceAll(path.sep, path.posix.sep) .replace(/\/index\.md$/, '/') - .replace(/\.md$/, '/') - .replace(/releaseNotes\.yml$/, 'release-notes/')); + .replace(/\.md$/, '/')); }, docrootURL: function(node) { @@ -40,21 +39,15 @@ module.exports = { }, canonicalURL: function(node) { - const urlpath = this.urlpath(node); - const version = urlpath.split(path.posix.sep)[2]; - if (version === "v1") { - // v1 docs aren't on getambassador.io - return urlpath; - } - const relpath = urlpath.split(path.posix.sep).slice(3).join(path.posix.sep); + const relpath = this.urlpath(node).split(path.posix.sep).slice(3).join(path.posix.sep); return `https://www.getambassador.io/docs/telepresence/latest/${relpath}`; }, githubURL: function(node) { const gitpath = 'docs/' + (node.relativePath - .replace(/^latest/,'v2.5') + .replace(/^latest\//,'') .replaceAll(path.sep, path.posix.sep)); - return `https://github.com/telepresenceio/telepresence.io/blob/master/${gitpath}`; + return `https://github.com/telepresenceio/telepresence/blob/release/v2/${gitpath}`; }, // Don't show reading time for Telepresence. diff --git a/docs/2.14/doc-links.yml b/docs/2.14/doc-links.yml deleted file mode 100644 index ecc9da4f..00000000 --- a/docs/2.14/doc-links.yml +++ /dev/null @@ -1,83 +0,0 @@ -- title: Quick start - link: quick-start -- title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Install Traffic Manager - link: install/manager/ - - title: Install Traffic Manager with Helm - link: install/helm/ - - title: Cloud Provider Prerequisites - link: install/cloud/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ -- title: Core concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: "Making the remote local: Faster feedback, collaboration and debugging" - link: concepts/faster - - title: Types of intercepts - link: concepts/intercepts -- title: How do I... - items: - - title: Intercept a service in your own environment - link: howtos/intercepts - - title: Proxy outbound traffic to my cluster - link: howtos/outbound - - title: Host a cluster in a local VM - link: howtos/cluster-in-vm -- title: Technical reference - items: - - title: Architecture - link: reference/architecture - - title: Client reference - link: reference/client - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Using Docker for intercepts - link: reference/docker-run - - title: Running Telepresence in a Docker container - link: reference/inside-container - - title: Environment variables - link: reference/environment - - title: Intercepts - link: reference/intercepts/ - items: - - title: Configure intercept using CLI - link: reference/intercepts/cli - - title: Manually injecting the Traffic Agent - link: reference/intercepts/manual-agent - - title: Volume mounts - link: reference/volume - - title: RESTful API service - link: reference/restapi - - title: DNS resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: Telepresence and VPNs - link: reference/vpn - - title: Networking through Virtual Network Interface - link: reference/tun-device - - title: Connection Routing - link: reference/routing - - title: Using Telepresence with Linkerd - link: reference/linkerd -- title: FAQs - link: faqs -- title: Troubleshooting - link: troubleshooting -- title: Community - link: community -- title: Release Notes - link: release-notes -- title: Licenses - link: licenses \ No newline at end of file diff --git a/docs/2.14/quick-start/TelepresenceQuickStartLanding.js b/docs/2.14/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 9395b9cb..00000000 --- a/docs/2.14/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,61 +0,0 @@ -import React from 'react'; -import Icon from '../../../../../src/components/Icon'; -import Link from '../../../../../src/components/Link'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -const TelepresenceQuickStartLanding = () => { - - return ( -
-

- Telepresence OSS -

-

- Set up your ideal development environment for Kubernetes in seconds. - Accelerate your inner development loop with hot reload using your - existing IDE, and workflow. -

- -
-
-
-

- Install Telepresence and connect to your Kubernetes workloads. -

- - Get Started - -
-
-
- -
-
-

- What Can Telepresence Do for You? -

-

Telepresence gives Kubernetes application developers:

-
    -
  • Make changes on the fly and see them reflected when interacting with your remote Kubernetes environment, this is just like hot reloading, but it works across both local and remote environments.
  • -
  • Query services and microservice APIs that are only accessible in your remote cluster's network.
  • -
  • Set breakpoints in your IDE and re-route remote traffic to your local machine to investigate bugs with realistic user traffic and API calls.
  • -
- - LEARN MORE{' '} - - -
-
-
- ); -}; - -export default TelepresenceQuickStartLanding; diff --git a/docs/2.14/quick-start/qs-cards.js b/docs/2.14/quick-start/qs-cards.js deleted file mode 100644 index 5b68aa4a..00000000 --- a/docs/2.14/quick-start/qs-cards.js +++ /dev/null @@ -1,71 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import { Link as GatsbyLink } from 'gatsby'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - - - - - FAQs - - - - Learn more about uses cases and the technical implementation of - Telepresence. - - - - -
- ); -} diff --git a/docs/2.14/quick-start/telepresence-quickstart-landing.less b/docs/2.14/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index e2a83df4..00000000 --- a/docs/2.14/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,152 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.doc-body .telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: -8.4px auto 48px; - max-width: 1050px; - min-width: @docs-min-width; - width: 100%; - - h1 { - color: @blue-dark; - font-weight: normal; - letter-spacing: 0.25px; - font-size: 33px; - margin: 0 0 15px; - } - p { - font-size: 0.875rem; - line-height: 24px; - margin: 0; - padding: 0; - } - - .demo-cluster-container { - display: grid; - margin: 40px 0; - grid-template-columns: 1fr; - grid-template-columns: 1fr; - @media screen and (max-width: 900px) { - grid-template-columns: repeat(1, 1fr); - } - } - .main-title-container { - display: flex; - flex-direction: column; - align-items: center; - p { - text-align: center; - font-size: 0.875rem; - } - } - h2 { - font-size: 23px; - color: @black; - margin: 0 0 20px 0; - padding: 0; - &.underlined { - padding-bottom: 2px; - border-bottom: 3px solid @grey-separator; - text-align: center; - } - strong { - font-weight: 800; - } - &.subtitle { - margin-bottom: 10px; - font-size: 19px; - line-height: 28px; - } - } - .learn-more, - .get-started { - font-size: 14px; - font-weight: 600; - letter-spacing: 1.25px; - display: flex; - align-items: center; - text-decoration: none; - &.inline { - display: inline-block; - text-decoration: underline; - font-size: unset; - font-weight: normal; - &:hover { - text-decoration: none; - } - } - &.blue { - color: @blue-5; - } - &.blue:hover { - color: @blue-dark; - } - } - - .learn-more { - margin-top: 20px; - padding: 13px 0; - } - - .box-container { - &.border { - border: 1.5px solid @grey-separator; - border-radius: 5px; - padding: 10px; - } - &::before { - content: ''; - position: absolute; - width: 14px; - height: 14px; - border-radius: 50%; - top: 0; - left: 50%; - transform: translate(-50%, -50%); - } - p { - font-size: 0.875rem; - line-height: 24px; - padding: 0; - } - } - - .telepresence-video { - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 18px; - h2.telepresence-video-title { - font-weight: 400; - font-size: 23px; - line-height: 33px; - color: @blue-6; - } - } - - .video-section { - display: grid; - grid-template-columns: 1fr 1fr; - column-gap: 20px; - @media screen and (max-width: 800px) { - grid-template-columns: 1fr; - } - ul { - font-size: 14px; - margin: 0 10px 6px 0; - } - .video-container { - position: relative; - padding-bottom: 56.25%; // 16:9 aspect ratio - height: 0; - iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - } - } - } -} diff --git a/docs/2.14/releaseNotes.yml b/docs/2.14/releaseNotes.yml deleted file mode 100644 index 49526802..00000000 --- a/docs/2.14/releaseNotes.yml +++ /dev/null @@ -1,2270 +0,0 @@ -# This file should be placed in the folder for the version of the -# product that's meant to be documented. A `/release-notes` page will -# be automatically generated and populated at build time. -# -# Note that an entry needs to be added to the `doc-links.yml` file in -# order to surface the release notes in the table of contents. -# -# The YAML in this file should contain: -# -# changelog: An (optional) URL to the CHANGELOG for the product. -# items: An array of releases with the following attributes: -# - version: The (optional) version number of the release, if applicable. -# - date: The date of the release in the format YYYY-MM-DD. -# - notes: An array of noteworthy changes included in the release, each having the following attributes: -# - type: The type of change, one of `bugfix`, `feature`, `security` or `change`. -# - title: A short title of the noteworthy change. -# - body: >- -# Two or three sentences describing the change and why it -# is noteworthy. This is HTML, not plain text or -# markdown. It is handy to use YAML's ">-" feature to -# allow line-wrapping. -# - image: >- -# The URL of an image that visually represents the -# noteworthy change. This path is relative to the -# `release-notes` directory; if this file is -# `FOO/releaseNotes.yml`, then the image paths are -# relative to `FOO/release-notes/`. -# - docs: The path to the documentation page where additional information can be found. -# - href: A path from the root to a resource on the getambassador website, takes precedence over a docs link. - -docTitle: Telepresence Release Notes -docDescription: >- - Release notes for Telepresence by Ambassador Labs, a CNCF project - that enables developers to iterate rapidly on Kubernetes - microservices by arming them with infinite-scale development - environments, access to instantaneous feedback loops, and highly - customizable development environments. - -changelog: https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md - -items: - - version: 2.14.0 - date: "2023-06-12" - notes: - - type: feature - title: DNS configuration now supports excludes and mappings. - body: >- - The DNS configuration now supports two new fields, excludes and mappings. The excludes field allows you to - exclude a given list of hostnames from resolution, while the mappings field can be used to resolve a hostname with - another. - docs: https://github.com/telepresenceio/telepresence/pull/3172 - - - type: feature - title: Added the ability to exclude environment variables - body: >- - Added a new config map that can take an array of environment variables that will - then be excluded from an intercept that retrieves the environment of a pod. - - - type: bugfix - title: Fixed traffic-agent backward incompatibility issue causing lack of remote mounts - body: >- - A traffic-agent of version 2.13.3 (or 1.13.15) would not propagate the directories under - /var/run/secrets when used with a traffic manager older than 2.13.3. - - - type: bugfix - title: Fixed race condition causing segfaults on rare occasions when a tunnel stream timed out. - body: >- - A context cancellation could sometimes be trapped in a stream reader, causing it to incorrectly return - an undefined message which in turn caused the parent reader to panic on a nil pointer reference. - docs: https://github.com/telepresenceio/telepresence/pull/2963 - - - type: change - title: Routing conflict reporting. - body: >- - Telepresence will now attempt to detect and report routing conflicts with other running VPN software on client machines. - There is a new configuration flag that can be tweaked to allow certain CIDRs to be overridden by Telepresence. - - - type: change - title: test-vpn command deprecated - body: >- - Running telepresence test-vpn will now print a deprecation warning and exit. The command will be removed in a future release. - Instead, please configure telepresence for your VPN's routes. - - version: 2.13.3 - date: "2023-05-25" - notes: - - type: feature - title: Add imagePullSecrets to hooks - body: >- - Add .Values.hooks.curl.imagePullSecrets and .Values.hooks curl.imagePullSecrets to Helm values. - docs: https://github.com/telepresenceio/telepresence/pull/3079 - - - type: change - title: Change reinvocation policy to Never for the mutating webhook - body: >- - The default setting of the reinvocationPolicy for the mutating webhook dealing with agent injections changed from Never to IfNeeded. - - - type: bugfix - title: Fix mounting fail of IAM roles for service accounts web identity token - body: >- - The eks.amazonaws.com/serviceaccount volume injected by EKS is now exported and remotely mounted during an intercept. - docs: https://github.com/telepresenceio/telepresence/issues/3166 - - - type: bugfix - title: Correct namespace selector for cluster versions with non-numeric characters - body: >- - The mutating webhook now correctly applies the namespace selector even if the cluster version contains non-numeric characters. For example, it can now handle versions such as Major:"1", Minor:"22+". - docs: https://github.com/telepresenceio/telepresence/pull/3184 - - - type: bugfix - title: Enable IPv6 on the telepresence docker network - body: >- - The "telepresence" Docker network will now propagate DNS AAAA queries to the Telepresence DNS resolver when it runs in a Docker container. - docs: https://github.com/telepresenceio/telepresence/issues/3179 - - - type: bugfix - title: Fix the crash when intercepting with --local-only and --docker-run - body: >- - Running telepresence intercept --local-only --docker-run no longer results in a panic. - docs: https://github.com/telepresenceio/telepresence/issues/3171 - - - type: bugfix - title: Fix incorrect error message with local-only mounts - body: >- - Running telepresence intercept --local-only --mount false no longer results in an incorrect error message saying "a local-only intercept cannot have mounts". - docs: https://github.com/telepresenceio/telepresence/issues/3171 - - - type: bugfix - title: specify port in hook urls - body: >- - The helm chart now correctly handles custom agentInjector.webhook.port that was not being set in hook URLs. - docs: https://github.com/telepresenceio/telepresence/pull/3161 - - - type: bugfix - title: Fix wrong default value for disableGlobal and agentArrival - body: >- - Params .intercept.disableGlobal and .timeouts.agentArrival are now correctly honored. - - - version: 2.13.2 - date: "2023-05-12" - notes: - - type: bugfix - title: Authenticator Service Update - body: >- - Replaced / characters with a - when the authenticator service creates the kubeconfig in the Telepresence cache. - docs: https://github.com/telepresenceio/telepresence/pull/3167 - - - type: bugfix - title: Enhanced DNS Search Path Configuration for Windows (Auto, PowerShell, and Registry Options) - body: >- - Configurable strategy (auto, powershell. or registry) to set the global DNS search path on Windows. Default is auto which means try powershell first, and if it fails, fall back to registry. - docs: https://github.com/telepresenceio/telepresence/pull/3154 - - - type: feature - title: Configurable Traffic Manager Timeout in values.yaml - body: >- - The timeout for the traffic manager to wait for traffic agent to arrive can now be configured in the values.yaml file using timeouts.agentArrival. The default timeout is still 30 seconds. - docs: https://github.com/telepresenceio/telepresence/pull/3148 - - - type: bugfix - title: Enhanced Local Cluster Discovery for macOS and Windows - body: >- - The automatic discovery of a local container based cluster (minikube or kind) used when the Telepresence daemon runs in a container, now works on macOS and Windows, and with different profiles, ports, and cluster names - docs: https://github.com/telepresenceio/telepresence/pull/3165 - - - type: bugfix - title: FTP Stability Improvements - body: >- - Multiple simultaneous intercepts can transfer large files in bidirectionally and in parallel. - docs: https://github.com/telepresenceio/telepresence/pull/3157 - - - type: bugfix - title: Intercepted Persistent Volume Pods No Longer Cause Timeouts - body: >- - Pods using persistent volumes no longer causes timeouts when intercepted. - docs: https://github.com/telepresenceio/telepresence/pull/3157 - - - type: bugfix - title: Successful 'Telepresence Connect' Regardless of DNS Configuration - body: >- - Ensure that `telepresence connect`` succeeds even though DNS isn't configured correctly. - docs: https://github.com/telepresenceio/telepresence/pull/3154 - - - type: bugfix - title: Traffic-Manager's 'Close of Closed Channel' Panic Issue - body: >- - The traffic-manager would sometimes panic with a "close of closed channel" message and exit. - docs: https://github.com/telepresenceio/telepresence/pull/3160 - - - type: bugfix - title: Traffic-Manager's Type Cast Panic Issue - body: >- - The traffic-manager would sometimes panic and exit after some time due to a type cast panic. - docs: https://github.com/telepresenceio/telepresence/pull/3153 - - - type: bugfix - title: Login Friction - body: >- - Improve login behavior by clearing the saved intermediary API Keys when a user logins to force Telepresence to generate new ones. - - - version: 2.13.1 - date: "2023-04-20" - notes: - - type: change - title: Update ambassador-telepresence-agent to version 1.13.13 - body: >- - The malfunction of the Ambassador Telepresence Agent occurred as a result of an update which compressed the executable file. - - - version: 2.13.0 - date: "2023-04-18" - notes: - - type: feature - title: Better kind / minikube network integration with docker - body: >- - The Docker network used by a Kind or Minikube (using the "docker" driver) installation, is automatically detected and connected to a Docker container running the Telepresence daemon. - docs: https://github.com/telepresenceio/telepresence/pull/3104 - - - type: feature - title: New mapped namespace output - body: >- - Mapped namespaces are included in the output of the telepresence status command. - - - type: feature - title: Setting of the target IP of the intercept - docs: reference/intercepts/cli - body: >- - There's a new --address flag to the intercept command allowing users to set the target IP of the intercept. - - - type: feature - title: Multi-tenant support - body: >- - The client will no longer need cluster wide permissions when connected to a namespace scoped Traffic Manager. - - - type: bugfix - title: Cluster domain resolution bugfix - body: >- - The Traffic Manager now uses a fail-proof way to determine the cluster domain. - docs: https://github.com/telepresenceio/telepresence/issues/3114 - - - type: bugfix - title: Windows DNS - body: >- - DNS on windows is more reliable and performant. - docs: https://github.com/telepresenceio/telepresence/issues/2939 - - - type: bugfix - title: Agent injection with huge amount of deployments - body: >- - The agent is now correctly injected even with a high number of deployment starting at the same time. - docs: https://github.com/telepresenceio/telepresence/issues/3025 - - - type: bugfix - title: Self-contained kubeconfig with Docker - body: >- - The kubeconfig is made self-contained before running Telepresence daemon in a Docker container. - docs: https://github.com/telepresenceio/telepresence/issues/3099 - - - type: bugfix - title: Version command error - body: >- - The version command won't throw an error anymore if there is no kubeconfig file defined. - docs: https://github.com/telepresenceio/telepresence/issues/3095 - - - version: 2.12.2 - date: "2023-04-04" - notes: - - type: security - title: Update Golang build version to 1.20.3 - body: >- - Update Golang to 1.20.3 to address CVE-2023-24534, CVE-2023-24536, CVE-2023-24537, and CVE-2023-24538 - - version: 2.12.1 - date: "2023-03-22" - notes: - - type: feature - title: Additions to gather-logs - body: >- - Telepresence now includes the kubeauth logs when running - the gather-logs command - - type: bugfix - title: Environment Variables are now propagated to kubeauth - body: >- - Telepresence now propagates environment variables properly - to the kubeauth-foreground to be used with cluster authentication - - version: 2.12.0 - date: "2023-03-20" - notes: - - type: feature - title: Check for service connectivity independently from pod connectivity - body: >- - Telepresence now enables you to check for a service and pod's connectivity independently, so that it can proxy one without proxying the other. - docs: https://github.com/telepresenceio/telepresence/issues/2911 - - type: bugfix - title: Fix cluster authentication when running the telepresence daemon in a docker container. - body: >- - Authentication to EKS and GKE clusters have been fixed (k8s >= v1.26) - docs: https://github.com/telepresenceio/telepresence/pull/3055 - - type: bugfix - body: >- - Telepresence will not longer panic when a CNAME does not contain the .svc in it - title: Fix panic when CNAME of kubernetes.default doesn't contain .svc - docs: https://github.com/telepresenceio/telepresence/issues/3015 - - version: 2.11.1 - date: "2023-02-27" - notes: - - type: bugfix - title: Multiple architectures - docs: https://github.com/telepresenceio/telepresence/issues/3043 - body: >- - The multi-arch build for the ambassador-telepresence-manager and ambassador-telepresence-agent now - works for both amd64 and arm64. - - type: bugfix - title: Ambassador agent Helm chart duplicates - docs: https://github.com/telepresenceio/telepresence/issues/3046 - body: >- - Some labels in the Helm chart for the Ambassador Agent were duplicated, causing problems for FluxCD. - - version: 2.11.0 - date: "2023-02-22" - notes: - - type: feature - title: Support for arm64 (Apple Silicon) - body: >- - The ambassador-telepresence-manager and ambassador-telepresence-agent are now distributed as - multi-architecture images and can run natively on both linux/amd64 and linux/arm64. - - type: bugfix - title: Connectivity check can break routing in VPN setups - docs: https://github.com/telepresenceio/telepresence/issues/3006 - body: >- - The connectivity check failed to recognize that the connected peer wasn't a traffic-manager. Consequently, - it didn't proxy the cluster because it incorrectly assumed that a successful connect meant cluster connectivity, - - type: bugfix - title: VPN routes not detected by telepresence test-vpn on macOS - docs: https://github.com/telepresenceio/telepresence/pull/3038 - body: >- - The telepresence test-vpn did not include routes of type link when checking for subnet - conflicts. - - version: 2.10.5 - date: "2023-02-06" - notes: - - type: bugfix - title: Daemon reconnection fix - body: >- - Fixed a bug that prevented the local daemons from automatically reconnecting to the traffic manager when the network connection was lost. - - version: 2.10.4 - date: "2023-01-20" - notes: - - type: bugfix - title: Backward compatibility restored - body: >- - Telepresence can now create intercepts with traffic-managers of version 2.9.5 and older. - - version: 2.10.2 - date: "2023-01-16" - notes: - - type: bugfix - title: version consistency in helm commands - body: >- - Ensure that CLI and user-daemon binaries are the same version when running telepresence helm install - or telepresence helm upgrade. - docs: https://github.com/telepresenceio/telepresence/pull/2975 - - type: bugfix - title: Release Process - body: >- - Fixed an issue that prevented the --use-saved-intercept flag from working. - - version: 2.10.1 - date: "2023-01-11" - notes: - - type: bugfix - title: Release Process - body: >- - Fixed a regex in our release process that prevented 2.10.0 promotion. - - version: 2.10.0 - date: "2023-01-11" - notes: - - type: feature - title: Added `insert` and `upgrade` Subcommands to `telepresence helm` - body: >- - The `telepresence helm` sub-commands `insert` and `upgrade` now accepts all types of helm `--set-XXX` flags. - - type: feature - title: Added Image Pull Secrets to Helm Chart - body: >- - Image pull secrets for the traffic-agent can now be added using the Helm chart setting `agent.image.pullSecrets`. - - type: change - title: Rename Configmap - body: >- - The configmap `traffic-manager-clients` has been renamed to `traffic-manager`. - - type: change - title: Webhook Namespace Field - body: >- - If the cluster is Kubernetes 1.21 or later, the mutating webhook will find the correct namespace using the label `kubernetes.io/metadata.name` rather than `app.kuberenetes.io/name`. - docs: https://github.com/telepresenceio/telepresence/issues/2913 - - type: change - title: Rename Webhook - body: >- - The name of the mutating webhook now contains the namespace of the traffic-manager so that the webhook is easier to identify when there are multiple namespace scoped telepresence installations in the cluster. - - type: change - title: OSS Binaries - body: >- - The OSS Helm chart is no longer pushed to the datawire Helm repository. It will instead be pushed from the telepresence proprietary repository. The OSS Helm chart is still what's embedded in the OSS telepresence client. - docs: https://github.com/telepresenceio/telepresence/pull/2943 - - type: bugfix - title: Fix Panic Using `--docker-run` - body: >- - Telepresence no longer panics when `--docker-run` is combined with `--name ` instead of `--name=`. - docs: https://github.com/telepresenceio/telepresence/issues/2953 - - type: bugfix - title: Stop assuming cluster domain - body: >- - Telepresence traffic-manager extracts the cluster domain (e.g. "cluster.local") using a CNAME lookup for "kubernetes.default" instead of "kubernetes.default.svc". - docs: https://github.com/telepresenceio/telepresence/pull/2959 - - type: bugfix - title: Uninstall hook timeout - body: >- - A timeout was added to the pre-delete hook `uninstall-agents`, so that a helm uninstall doesn't hang when there is no running traffic-manager. - docs: https://github.com/telepresenceio/telepresence/pull/2937 - - type: bugfix - title: Uninstall hook check - body: >- - The `Helm.Revision` is now used to prevent that Helm hook calls are served by the wrong revision of the traffic-manager. - docs: https://github.com/telepresenceio/telepresence/issues/2954 - - version: 2.9.5 - date: "2022-12-08" - notes: - - type: security - title: Update to golang v1.19.4 - body: >- - Apply security updates by updating to golang v1.19.4 - docs: https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU - - type: bugfix - title: GCE authentication - body: >- - Fixed a regression, that was introduced in 2.9.3, preventing use of gce authentication without also having a config element present in the gce configuration in the kubeconfig. - - version: 2.9.4 - date: "2022-12-02" - notes: - - type: feature - title: Subnet detection strategy - body: >- - The traffic-manager can automatically detect that the node subnets are different from the pod subnets, and switch detection strategy to instead use subnets that cover the pod IPs. - - type: bugfix - title: Fix `--set` flag for `telepresence helm install` - body: >- - The `telepresence helm` command `--set x=y` flag didn't correctly set values of other types than `string`. The code now uses standard Helm semantics for this flag. - - type: bugfix - title: Fix `agent.image` setting propigation - body: >- - Telepresence now uses the correct `agent.image` properties in the Helm chart when copying agent image settings from the `config.yml` file. - - type: bugfix - title: Delay file sharing until needed - body: >- - Initialization of FTP type file sharing is delayed, so that setting it using the Helm chart value `intercept.useFtp=true` works as expected. - - type: bugfix - title: Cleanup on `telepresence quit` - body: >- - The port-forward that is created when Telepresence connects to a cluster is now properly closed when `telepresence quit` is called. - - type: bugfix - title: Watch `config.yml` without panic - body: >- - The user daemon no longer panics when the `config.yml` is modified at a time when the user daemon is running but no session is active. - - type: bugfix - title: Thread safety - body: >- - Fix race condition that would occur when `telepresence connect` `telepresence leave` was called several times in rapid succession. - - version: 2.9.3 - date: "2022-11-23" - notes: - - type: feature - title: Helm options for `livenessProbe` and `readinessProbe` - body: >- - The helm chart now supports `livenessProbe` and `readinessProbe` for the traffic-manager deployment, so that the pod automatically restarts if it doesn't respond. - - type: change - title: Improved network communication - body: >- - The root daemon now communicates directly with the traffic-manager instead of routing all outbound traffic through the user daemon. - - type: bugfix - title: Root daemon debug logging - body: >- - Using `telepresence loglevel LEVEL` now also sets the log level in the root daemon. - - type: bugfix - title: Multivalue flag value propagation - body: >- - Multi valued kubernetes flags such as `--as-group` are now propagated correctly. - - type: bugfix - title: Root daemon stability - body: >- - The root daemon would sometimes hang indefinitely when quit and connect were called in rapid succession. - - type: bugfix - title: Base DNS resolver - body: >- - Don't use `systemd.resolved` base DNS resolver unless cluster is proxied. - - version: 2.9.2 - date: "2022-11-16" - notes: - - type: bugfix - title: Fix panic - body: >- - Fix panic when connecting to an older traffic-manager. - - type: bugfix - title: Fix header flag - body: >- - Fix an issue where the `http-header` flag sometimes wouldn't propagate correctly. - - version: 2.9.1 - date: "2022-11-16" - notes: - - type: bugfix - title: Connect failures due to missing auth provider. - body: >- - The regression in 2.9.0 that caused a `no Auth Provider found for name “gcp”` error when connecting was fixed. - - version: 2.9.0 - date: "2022-11-15" - notes: - - type: feature - title: New command to view client configuration. - body: >- - A new telepresence config view was added to make it easy to view the current - client configuration. - docs: new-in-2.9#view-the-client-configuration - - type: feature - title: Configure Clients using the Helm chart. - body: >- - The traffic-manager can now configure all clients that connect through the client: map in - the values.yaml file. - docs: reference/cluster-config#client-configuration - - type: feature - title: The Traffic manager version is more visible. - body: >- - The command telepresence version will now include the version of the traffic manager when - the client is connected to a cluster. - - type: feature - title: Command output in YAML format. - body: >- - The global --output flag now accepts both yaml and json. - docs: new-in-2.9#yaml-output - - type: change - title: Deprecated status command flag - body: >- - The telepresence status --json flag is deprecated. Use telepresence status --output=json instead. - - type: bugfix - title: Unqualified service name resolution in docker. - body: >- - Unqualified service names now resolves OK from the docker container when using telepresence intercept --docker-run. - docs: https://github.com/telepresenceio/telepresence/issues/2870 - - type: bugfix - title: Output no longer mixes plaintext and json. - body: >- - Informational messages that don't really originate from the command, such as "Launching Telepresence Root Daemon", - or "An update of telepresence ...", are discarded instead of being printed as plain text before the actual formatted - output when using the --output=json. - docs: https://github.com/telepresenceio/telepresence/issues/2854 - - type: bugfix - title: No more panic when invalid port names are detected. - body: >- - A `telepresence intercept` of services with invalid port no longer causes a panic. - docs: https://github.com/telepresenceio/telepresence/issues/2880 - - type: bugfix - title: Proper errors for bad output formats. - body: >- - An attempt to use an invalid value for the global --output flag now renders a proper error message. - - type: bugfix - title: Remove lingering DNS config on macOS. - body: >- - Files lingering under /etc/resolver as a result of ungraceful shutdown of the root daemon on macOS, are - now removed when a new root daemon starts. - - version: 2.8.5 - date: "2022-11-2" - notes: - - type: security - title: CVE-2022-41716 - body: >- - Updated Golang to 1.19.3 to address CVE-2022-41716. - - version: 2.8.4 - date: "2022-11-2" - notes: - - type: bugfix - title: Release Process - body: >- - This release resulted in changes to our release process. - - version: 2.8.3 - date: "2022-10-27" - notes: - - type: feature - title: Ability to disable global intercepts. - body: >- - Global intercepts (a.k.a. TCP intercepts) can now be disabled by using the new Helm chart setting intercept.disableGlobal. - docs: https://github.com/telepresenceio/telepresence/issues/2140 - - type: feature - title: Configurable mutating webhook port - body: >- - The port used for the mutating webhook can be configured using the Helm chart setting - agentInjector.webhook.port. - docs: install/helm - - type: change - title: Mutating webhook port defaults to 443 - body: >- - The default port for the mutating webhook is now 443. It used to be 8443. - - type: change - title: Agent image configuration mandatory in air-gapped environments. - body: >- - The traffic-manager will no longer default to use the tel2 image for the traffic-agent when it is - unable to connect to Ambassador Cloud. Air-gapped environments must declare what image to use in the Helm chart. - - type: bugfix - title: Can now connect to non-helm installs - body: >- - telepresence connect now works as long as the traffic manager is installed, even if - it wasn't installed via >code>helm install - docs: https://github.com/telepresenceio/telepresence/issues/2824 - - type: bugfix - title: check-vpn crash fixed - body: >- - telepresence check-vpn no longer crashes when the daemons don't start properly. - - version: 2.8.2 - date: "2022-10-15" - notes: - - type: bugfix - title: Reinstate 2.8.0 - body: >- - There was an issue downloading the free enhanced client. This problem was fixed, 2.8.0 was reinstated - - version: 2.8.1 - date: "2022-10-14" - notes: - - type: bugfix - title: Rollback 2.8.0 - body: >- - Rollback 2.8.0 while we investigate an issue with ambassador cloud. - - version: 2.8.0 - date: "2022-10-14" - notes: - - type: feature - title: Improved DNS resolver - body: >- - The Telepresence DNS resolver is now capable of resolving queries of type A, AAAA, CNAME, - MX, NS, PTR, SRV, and TXT. - docs: reference/dns - - type: feature - title: New `client` structure in Helm chart - body: >- - A new client struct was added to the Helm chart. It contains a connectionTTL that controls - how long the traffic manager will retain a client connection without seeing any sign of life from the client. - docs: reference/cluster-config#Client-Configuration - - type: feature - title: Include and exclude suffixes configurable using the Helm chart. - body: >- - A dns element was added to the client struct in Helm chart. It contains an includeSuffixes and - an excludeSuffixes value that controls what type of names that the DNS resolver in the client will delegate to - the cluster. - docs: reference/cluster-config#DNS - - type: feature - title: Configurable traffic-manager API port - body: >- - The API port used by the traffic-manager is now configurable using the Helm chart value apiPort. - The default port is 8081. - docs: https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence - - type: feature - title: Envoy server and admin port configuration. - body: >- - An new agent struct was added to the Helm chart. It contains an `envoy` structure where the server and - admin port of the Envoy proxy running in the enhanced traffic-agent can be configured. - docs: reference/cluster-config#Envoy-Configuration - - type: change - title: Helm chart `dnsConfig` moved to `client.routing`. - body: >- - The Helm chart dnsConfig was deprecated but retained for backward compatibility. The fields alsoProxySubnets - and neverProxySubnets can now be found under routing in the client struct. - docs: reference/cluster-config#Routing - - type: change - title: Helm chart `agentInjector.agentImage` moved to `agent.image`. - body: >- - The Helm chart agentInjector.agentImage was moved to agent.image. The old value is deprecated but - retained for backward compatibility. - docs: reference/cluster-config#Image-Configuration - - type: change - title: Helm chart `agentInjector.appProtocolStrategy` moved to `agent.appProtocolStrategy`. - body: >- - The Helm chart agentInjector.appProtocolStrategy was moved to agent.appProtocolStrategy. The old - value is deprecated but retained for backward compatibility. - docs: reference/cluster-config#Application-Protocol-Selection - - type: change - title: Helm chart `dnsServiceName`, `dnsServiceNamespace`, and `dnsServiceIP` removed. - body: >- - The Helm chart dnsServiceName, dnsServiceNamespace, and dnsServiceIP has been removed, because - they are no longer needed. The TUN-device will use the traffic-manager pod-IP on platforms where it needs to - dedicate an IP for its local resolver. - - type: change - title: Quit daemons with `telepresence quit -s` - body: >- - The former options `-u` and `-r` for `telepresence quit` has been deprecated and replaced with one option `-s` which will - quit both the root daemon and the user daemon. - - type: bugfix - title: Environment variable interpolation in pods now works. - body: >- - Environment variable interpolation now works for all definitions that are copied from pod containers - into the injected traffic-agent container. - - type: bugfix - title: Early detection of namespace conflict - body: >- - An attempt to create simultaneous intercepts that span multiple namespace on the same workstation - is detected early and prohibited instead of resulting in failing DNS lookups later on. - - type: bugfix - title: Annoying log message removed - body: >- - Spurious and incorrect ""!! SRV xxx"" messages will no longer appear in the logs when the reason - is normal context cancellation. - - type: bugfix - title: Single name DNS resolution in Docker on Linux host - body: >- - Single label names now resolves correctly when using Telepresence in Docker on a Linux host - - type: bugfix - title: Misnomer `appPortStrategy` in Helm chart renamed to `appProtocolStrategy`. - body: >- - The Helm chart value appProtocolStrategy is now correctly named (used to be appPortStategy) - - version: 2.7.6 - date: "2022-09-16" - notes: - - type: feature - title: Helm chart resource entries for injected agents - body: >- - The resources for the traffic-agent container and the optional init container can be - specified in the Helm chart using the resources and initResource fields - of the agentInjector.agentImage - - type: feature - title: Cluster event propagation when injection fails - body: >- - When the traffic-manager fails to inject a traffic-agent, the cause for the failure is - detected by reading the cluster events, and propagated to the user. - - type: feature - title: FTP-client instead of sshfs for remote mounts - body: >- - Telepresence can now use an embedded FTP client and load an existing FUSE library instead of running - an external sshfs or sshfs-win binary. This feature is experimental in 2.7.x - and enabled by setting intercept.useFtp to true> in the config.yml. - - type: change - title: Upgrade of winfsp - body: >- - Telepresence on Windows upgraded winfsp from version 1.10 to 1.11 - - type: bugfix - title: Removal of invalid warning messages - body: >- - Running CLI commands on Apple M1 machines will no longer throw warnings about /proc/cpuinfo - and /proc/self/auxv. - - version: 2.7.5 - date: "2022-09-14" - notes: - - type: change - title: Rollback of release 2.7.4 - body: >- - This release is a rollback of the changes in 2.7.4, so essentially the same as 2.7.3 - - version: 2.7.4 - date: "2022-09-14" - notes: - - type: change - body: >- - This release was broken on some platforms. Use 2.7.6 instead. - - version: 2.7.3 - date: "2022-09-07" - notes: - - type: bugfix - title: PTY for CLI commands - body: >- - CLI commands that are executed by the user daemon now use a pseudo TTY. This enables - docker run -it to allocate a TTY and will also give other commands like bash read the - same behavior as when executed directly in a terminal. - docs: https://github.com/telepresenceio/telepresence/issues/2724 - - type: bugfix - title: Traffic Manager useless warning silenced - body: >- - The traffic-manager will no longer log numerous warnings saying Issuing a - systema request without ApiKey or InstallID may result in an error. - - type: bugfix - title: Traffic Manager useless error silenced - body: >- - The traffic-manager will no longer log an error saying Unable to derive subnets - from nodes when the podCIDRStrategy is auto and it chooses to instead derive the - subnets from the pod IPs. - - version: 2.7.2 - date: "2022-08-25" - notes: - - type: feature - title: Autocompletion scripts - body: >- - Autocompletion scripts can now be generated with telepresence completion SHELL where SHELL can be bash, zsh, fish or powershell. - - type: feature - title: Connectivity check timeout - body: >- - The timeout for the initial connectivity check that Telepresence performs - in order to determine if the cluster's subnets are proxied or not can now be configured - in the config.yml file using timeouts.connectivityCheck. The default timeout was - changed from 5 seconds to 500 milliseconds to speed up the actual connect. - docs: reference/config#timeouts - - type: change - title: gather-traces feedback - body: >- - The command telepresence gather-traces now prints out a message on success. - docs: troubleshooting#distributed-tracing - - type: change - title: upload-traces feedback - body: >- - The command telepresence upload-traces now prints out a message on success. - docs: troubleshooting#distributed-tracing - - type: change - title: gather-traces tracing - body: >- - The command telepresence gather-traces now traces itself and reports errors with trace gathering. - docs: troubleshooting#distributed-tracing - - type: change - title: CLI log level - body: >- - The cli.log log is now logged at the same level as the connector.log - docs: reference/config#log-levels - - type: bugfix - title: Telepresence --help fixed - body: >- - telepresence --help now works once more even if there's no user daemon running. - docs: https://github.com/telepresenceio/telepresence/issues/2735 - - type: bugfix - title: Stream cancellation when no process intercepts - body: >- - Streams created between the traffic-agent and the workstation are now properly closed - when no interceptor process has been started on the workstation. This fixes a potential problem where - a large number of attempts to connect to a non-existing interceptor would cause stream congestion - and an unresponsive intercept. - - type: bugfix - title: List command excludes the traffic-manager - body: >- - The telepresence list command no longer includes the traffic-manager deployment. - - version: 2.7.1 - date: "2022-08-10" - notes: - - type: change - title: Reinstate telepresence uninstall - body: >- - Reinstate telepresence uninstall with --everything depreciated - - type: change - title: Reduce telepresence helm uninstall - body: >- - telepresence helm uninstall will only uninstall the traffic-manager helm chart and no longer accepts the --everything, --agent, or --all-agents flags. - - type: bugfix - title: Auto-connect for telepresence intercpet - body: >- - telepresence intercept will attempt to connect to the traffic manager before creating an intercept. - - version: 2.7.0 - date: "2022-08-07" - notes: - - type: feature - title: Distributed Tracing - body: >- - The Telepresence components now collect OpenTelemetry traces. - Up to 10MB of trace data are available at any given time for collection from - components. telepresence gather-traces is a new command that will collect - all that data and place it into a gzip file, and telepresence upload-traces is - a new command that will push the gzipped data into an OTLP collector. - docs: troubleshooting#distributed-tracing - - type: feature - title: Helm install - body: >- - A new telepresence helm command was added to provide an easy way to install, upgrade, or uninstall the telepresence traffic-manager. - docs: install/manager - - type: feature - title: Ignore Volume Mounts - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-ignore-volume-mounts, that can be used to make the injector ignore specified volume mounts denoted by a comma-separated string. - - type: feature - title: telepresence pod-daemon - body: >- - The Docker image now contains a new program in addition to - the existing traffic-manager and traffic-agent: the pod-daemon. The - pod-daemon is a trimmed-down version of the user-daemon that is - designed to run as a sidecar in a Pod, enabling CI systems to create - preview deploys. - - type: feature - title: Prometheus support for traffic manager - body: >- - Added prometheus support to the traffic manager. - - type: change - title: No install on telepresence connect - body: >- - The traffic manager is no longer automatically installed into the cluster. Connecting or creating an intercept in a cluster without a traffic manager will return an error. - docs: install/manager - - type: change - title: Helm Uninstall - body: >- - The command telepresence uninstall has been moved to telepresence helm uninstall. - docs: install/manager - - type: bugfix - title: readOnlyRootFileSystem mounts work - body: >- - Add an emptyDir volume and volume mount under /tmp on the agent sidecar so it works with `readOnlyRootFileSystem: true` - docs: https://github.com/telepresenceio/telepresence/pull/2666 - - version: 2.6.8 - date: "2022-06-23" - notes: - - type: feature - title: Specify Your DNS - body: >- - The name and namespace for the DNS Service that the traffic-manager uses in DNS auto-detection can now be specified. - - type: feature - title: Specify a Fallback DNS - body: >- - Should the DNS auto-detection logic in the traffic-manager fail, users can now specify a fallback IP to use. - - type: feature - title: Intercept UDP Ports - body: >- - It is now possible to intercept UDP ports with Telepresence and also use --to-pod to forward UDP traffic from ports on localhost. - - type: change - title: Additional Helm Values - body: >- - The Helm chart will now add the nodeSelector, affinity and tolerations values to the traffic-manager's post-upgrade-hook and pre-delete-hook jobs. - - type: bugfix - title: Agent Injection Bugfix - body: >- - Telepresence no longer fails to inject the traffic agent into the pod generated for workloads that have no volumes and `automountServiceAccountToken: false`. - - version: 2.6.7 - date: "2022-06-22" - notes: - - type: bugfix - title: Persistant Sessions - body: >- - The Telepresence client will remember and reuse the traffic-manager session after a network failure or other reason that caused an unclean disconnect. - - type: bugfix - title: DNS Requests - body: >- - Telepresence will no longer forward DNS requests for "wpad" to the cluster. - - type: bugfix - title: Graceful Shutdown - body: >- - The traffic-agent will properly shut down if one of its goroutines errors. - - version: 2.6.6 - date: "2022-06-9" - notes: - - type: bugfix - title: Env Var `TELEPRESENCE_API_PORT` - body: >- - The propagation of the TELEPRESENCE_API_PORT environment variable now works correctly. - - type: bugfix - title: Double Printing `--output json` - body: >- - The --output json global flag no longer outputs multiple objects - - version: 2.6.5 - date: "2022-06-03" - notes: - - type: feature - title: Helm Option -- `reinvocationPolicy` - body: >- - The reinvocationPolicy or the traffic-agent injector webhook can now be configured using the Helm chart. - docs: install/helm - - type: feature - title: Helm Option -- Proxy Certificate - body: >- - The traffic manager now accepts a root CA for a proxy, allowing it to connect to ambassador cloud from behind an HTTPS proxy. This can be configured through the helm chart. - docs: install/helm - - type: feature - title: Helm Option -- Agent Injection - body: >- - A policy that controls when the mutating webhook injects the traffic-agent was added, and can be configured in the Helm chart. - docs: install/helm - - type: change - title: Windows Tunnel Version Upgrade - body: >- - Telepresence on Windows upgraded wintun.dll from version 0.12 to version 0.14.1 - - type: change - title: Helm Version Upgrade - body: >- - Telepresence upgraded its embedded Helm from version 3.8.1 to 3.9 - - type: change - title: Kubernetes API Version Upgrade - body: >- - Telepresence upgraded its embedded Kubernetes API from version 0.23.4 to 0.24.1 - - type: feature - title: Flag `--watch` Added to `list` Command - body: >- - Added a --watch flag to telepresence list that can be used to watch interceptable workloads in a namespace. - - type: change - title: Depreciated `images.webhookAgentImage` - body: >- - The Telepresence configuration setting for `images.webhookAgentImage` is now deprecated. Use `images.agentImage` instead. - - type: bugfix - title: Default `reinvocationPolicy` Set to Never - body: >- - The reinvocationPolicy or the traffic-agent injector webhook now defaults to Never insteadof IfNeeded so that LimitRanges on namespaces can inject a missing resources element into the injected traffic-agent container. - - type: bugfix - title: UDP - body: >- - UDP based communication with services in the cluster now works as expected. - - type: bugfix - title: Telepresence `--help` - body: >- - The command help will only show Kubernetes flags on the commands that supports them - - type: change - title: Error Count - body: >- - Only the errors from the last session will be considered when counting the number of errors in the log after a command failure. - - version: 2.6.4 - date: "2022-05-23" - notes: - - type: bugfix - title: Upgrade RBAC Permissions - body: >- - The traffic-manager RBAC grants permissions to update services, deployments, replicatsets, and statefulsets. Those permissions are needed when the traffic-manager upgrades from versions < 2.6.0 and can be revoked after the upgrade. - - version: 2.6.3 - date: "2022-05-20" - notes: - - type: bugfix - title: Relative Mount Paths - body: >- - The --mount intercept flag now handles relative mount points correctly on non-windows platforms. Windows still require the argument to be a drive letter followed by a colon. - - type: bugfix - title: Traffic Agent Config - body: >- - The traffic-agent's configuration update automatically when services are added, updated or deleted. - - type: bugfix - title: Container Injection for Numeric Ports - body: >- - Telepresence will now always inject an initContainer when the service's targetPort is numeric - - type: bugfix - title: Matching Services - body: >- - Workloads that have several matching services pointing to the same target port are now handled correctly. - - type: bugfix - title: Unexpected Panic - body: >- - A potential race condition causing a panic when closing a DNS connection is now handled correctly. - - type: bugfix - title: Mount Volume Cleanup - body: >- - A container start would sometimes fail because and old directory remained in a mounted temp volume. - - version: 2.6.2 - date: "2022-05-17" - notes: - - type: bugfix - title: Argo Injection - body: >- - Workloads controlled by workloads like Argo Rollout are injected correctly. - - type: bugfix - title: Agent Port Mapping - body: >- - Multiple services appointing the same container port no longer result in duplicated ports in an injected pod. - - type: bugfix - title: GRPC Max Message Size - body: >- - The telepresence list command no longer errors out with "grpc: received message larger than max" when listing namespaces with a large number of workloads. - - version: 2.6.1 - date: "2022-05-16" - notes: - - type: bugfix - title: KUBECONFIG environment variable - body: >- - Telepresence will now handle multiple path entries in the KUBECONFIG environment correctly. - - version: 2.6.0 - date: "2022-05-13" - notes: - - type: feature - title: Intercept multiple containers in a pod, and multiple ports per container - body: >- - Telepresence can now intercept multiple services and/or service-ports that connect to the same pod. - docs: new-in-2.6#intercept-multiple-containers-and-ports - - type: feature - title: The Traffic Agent sidecar is always injected by the Traffic Manager's mutating webhook - body: >- - The client will no longer modify deployments, replicasets, or statefulsets in order to - inject a Traffic Agent into an intercepted pod. Instead, all injection is now performed by a mutating webhook. As a result, - the client now needs less permissions in the cluster. - docs: install/upgrade#important-note-about-upgrading-to-2.6.0 - - type: change - title: Automatic upgrade of Traffic Agents - body: >- - When upgrading, all workloads with injected agents will have their agent "uninstalled" automatically. - The mutating webhook will then ensure that their pods will receive an updated Traffic Agent. - docs: new-in-2.6#no-more-workload-modifications - - type: change - title: No default image in the Helm chart - body: >- - The helm chart no longer has a default set for the agentInjector.image.name, and unless it's set, the - traffic-manager will ask Ambassador Could for the preferred image. - docs: new-in-2.6#smarter-agent - - type: change - title: Upgrade to Helm version 3.8.1 - body: The Telepresence client now uses Helm version 3.8.1 when auto-installing the Traffic Manager. - - type: bugfix - title: Remote mounts will now function correctly with custom securityContext - body: >- - The bug causing permission problems when the Traffic Agent is in a Pod with a custom securityContext has been fixed. - - type: bugfix - title: Improved presentation of flags in CLI help - body: The help for commands that accept Kubernetes flags will now display those flags in a separate group. - - type: bugfix - title: Better termination of process parented by intercept - body: >- - Occasionally an intercept will spawn a command using -- on the command line, often in another console. - When you use telepresence leave or telepresence quit while the intercept with the spawned command is still active, - Telepresence will now terminate that the command because it's considered to be parented by the intercept that is being removed. - - version: 2.5.8 - date: "2022-04-27" - notes: - - type: bugfix - title: Folder creation on `telepresence login` - body: >- - Fixed a bug where the telepresence config folder would not be created if the user ran telepresence login before other commands. - - version: 2.5.7 - date: "2022-04-25" - notes: - - type: change - title: RBAC requirements - body: >- - A namespaced traffic-manager will no longer require cluster wide RBAC. Only Roles and RoleBindings are now used. - - type: bugfix - title: Windows DNS - body: >- - The DNS recursion detector didn't work correctly on Windows, resulting in sporadic failures to resolve names that were resolved correctly at other times. - - type: bugfix - title: Session TTL and Reconnect - body: >- - A telepresence session will now last for 24 hours after the user's last connectivity. If a session expires, the connector will automatically try to reconnect. - - version: 2.5.6 - date: "2022-04-18" - notes: - - type: change - title: Less Watchers - body: >- - Telepresence agents watcher will now only watch namespaces that the user has accessed since the last connect. - - type: bugfix - title: More Efficient `gather-logs` - body: >- - The gather-logs command will no longer send any logs through gRPC. - - version: 2.5.5 - date: "2022-04-08" - notes: - - type: change - title: Traffic Manager Permissions - body: >- - The traffic-manager now requires permissions to read pods across namespaces even if installed with limited permissions - - type: bugfix - title: Linux DNS Cache - body: >- - The DNS resolver used on Linux with systemd-resolved now flushes the cache when the search path changes. - - type: bugfix - title: Automatic Connect Sync - body: >- - The telepresence list command will produce a correct listing even when not preceded by a telepresence connect. - - type: bugfix - title: Disconnect Reconnect Stability - body: >- - The root daemon will no longer get into a bad state when a disconnect is rapidly followed by a new connect. - - type: bugfix - title: Limit Watched Namespaces - body: >- - The client will now only watch agents from accessible namespaces, and is also constrained to namespaces explicitly mapped using the connect command's --mapped-namespaces flag. - - type: bugfix - title: Limit Namespaces used in `gather-logs` - body: >- - The gather-logs command will only gather traffic-agent logs from accessible namespaces, and is also constrained to namespaces explicitly mapped using the connect command's --mapped-namespaces flag. - - version: 2.5.4 - date: "2022-03-29" - notes: - - type: bugfix - title: Linux DNS Concurrency - body: >- - The DNS fallback resolver on Linux now correctly handles concurrent requests without timing them out - - type: bugfix - title: Non-Functional Flag - body: >- - The ingress-l5 flag will no longer be forcefully set to equal the --ingress-host flag - - type: bugfix - title: Automatically Remove Failed Intercepts - body: >- - Intercepts that fail to create are now consistently removed to prevent non-working dangling intercepts from sticking around. - - type: bugfix - title: Agent UID - body: >- - Agent container is no longer sensitive to a random UID or an UID imposed by a SecurityContext. - - type: bugfix - title: Gather-Logs Output Filepath - body: >- - Removed a bad concatenation that corrupted the output path of telepresence gather-logs. - - type: change - title: Remove Unnecessary Error Advice - body: >- - An advice to "see logs for details" is no longer printed when the argument count is incorrect in a CLI command. - - type: bugfix - title: Garbage Collection - body: >- - Client and agent sessions no longer leaves dangling waiters in the traffic-manager when they depart. - - type: bugfix - title: Limit Gathered Logs - body: >- - The client's gather logs command and agent watcher will now respect the configured grpc.maxReceiveSize - - type: change - title: In-Cluster Checks - body: >- - The TUN device will no longer route pod or service subnets if it is running in a machine that's already connected to the cluster - - type: change - title: Expanded Status Command - body: >- - The status command includes the install id, user id, account id, and user email in its result, and can print output as JSON - - type: change - title: List Command Shows All Intercepts - body: >- - The list command, when used with the --intercepts flag, will list the users intercepts from all namespaces - - version: 2.5.3 - date: "2022-02-25" - notes: - - type: bugfix - title: TCP Connectivity - body: >- - Fixed bug in the TCP stack causing timeouts after repeated connects to the same address - - type: feature - title: Linux Binaries - body: >- - Client-side binaries for the arm64 architecture are now available for linux - - version: 2.5.2 - date: "2022-02-23" - notes: - - type: bugfix - title: DNS server bugfix - body: >- - Fixed a bug where Telepresence would use the last server in resolv.conf - - version: 2.5.1 - date: "2022-02-19" - notes: - - type: bugfix - title: Fix GKE auth issue - body: >- - Fixed a bug where using a GKE cluster would error with: No Auth Provider found for name "gcp" - - version: 2.5.0 - date: "2022-02-18" - notes: - - type: feature - title: Intercept metadata - body: >- - The flag --http-meta can be used to declare metadata key value pairs that will be returned by the Telepresence rest - API endpoint /intercept-info - docs: reference/restapi#intercept-info - - type: change - title: Client RBAC watch - body: >- - The verb "watch" was added to the set of required verbs when accessing services and workloads for the client RBAC - ClusterRole - docs: reference/rbac - - type: change - title: Dropped backward compatibility with versions <=2.4.4 - body: >- - Telepresence is no longer backward compatible with versions 2.4.4 or older because the deprecated multiplexing tunnel - functionality was removed. - - type: change - title: No global networking flags - body: >- - The global networking flags are no longer used and using them will render a deprecation warning unless they are supported by the - command. The subcommands that support networking flags are connect, current-cluster-id, - and genyaml. - - type: bugfix - title: Output of status command - body: >- - The also-proxy and never-proxy subnets are now displayed correctly when using the - telepresence status command. - - type: bugfix - title: SETENV sudo privilege no longer needed - body: >- - Telepresence longer requires SETENV privileges when starting the root daemon. - - type: bugfix - title: Network device names containing dash - body: >- - Telepresence will now parse device names containing dashes correctly when determining routes that it should never block. - - type: bugfix - title: Linux uses cluster.local as domain instead of search - body: >- - The cluster domain (typically "cluster.local") is no longer added to the DNS search on Linux using - systemd-resolved. Instead, it is added as a domain so that names ending with it are routed - to the DNS server. - - version: 2.4.11 - date: "2022-02-10" - notes: - - type: change - title: Add additional logging to troubleshoot intermittent issues with intercepts - body: >- - We've noticed some issues with intercepts in v2.4.10, so we are releasing a version - with enhanced logging to help debug and fix the issue. - - version: 2.4.10 - date: "2022-01-13" - notes: - - type: feature - title: New --http-plaintext option - body: >- - The flag --http-plaintext can be used to ensure that an intercept uses plaintext http or grpc when - communicating with the workstation process. - docs: reference/intercepts/#tls - - type: feature - title: Configure the default intercept port - body: >- - The port used by default in the telepresence intercept command (8080), can now be changed by setting - the intercept.defaultPort in the config.yml file. - docs: reference/config/#intercept - - type: change - title: Telepresence CI now uses Github Actions - body: >- - Telepresence now uses Github Actions for doing unit and integration testing. It is - now easier for contributors to run tests on PRs since maintainers can add an - "ok to test" label to PRs (including from forks) to run integration tests. - docs: https://github.com/telepresenceio/telepresence/actions - image: telepresence-2.4.10-actions.png - - type: bugfix - title: Check conditions before asking questions - body: >- - User will not be asked to log in or add ingress information when creating an intercept until a check has been - made that the intercept is possible. - docs: reference/intercepts/ - - type: bugfix - title: Fix invalid log statement - body: >- - Telepresence will no longer log invalid: "unhandled connection control message: code DIAL_OK" errors. - - type: bugfix - title: Log errors from sshfs/sftp - body: >- - Output to stderr from the traffic-agent's sftp and the client's sshfs processes - are properly logged as errors. - - type: bugfix - title: Don't use Windows path separators in workload pod template - body: >- - Auto installer will no longer not emit backslash separators for the /tel-app-mounts paths in the - traffic-agent container spec when running on Windows. - - version: 2.4.9 - date: "2021-12-09" - notes: - - type: bugfix - title: Helm upgrade nil pointer error - body: >- - A helm upgrade using the --reuse-values flag no longer fails on a "nil pointer" error caused by a nil - telpresenceAPI value. - docs: install/helm#upgrading-the-traffic-manager - - version: 2.4.8 - date: "2021-12-03" - notes: - - type: feature - title: VPN diagnostics tool - body: >- - There is a new subcommand, test-vpn, that can be used to diagnose connectivity issues with a VPN. - See the VPN docs for more information on how to use it. - docs: reference/vpn - image: telepresence-2.4.8-vpn.png - - - type: feature - title: RESTful API service - body: >- - A RESTful service was added to Telepresence, both locally to the client and to the traffic-agent to - help determine if messages with a set of headers should be consumed or not from a message queue where the - intercept headers are added to the messages. - docs: reference/restapi - image: telepresence-2.4.8-health-check.png - - - type: change - title: TELEPRESENCE_LOGIN_CLIENT_ID env variable no longer used - body: >- - You could previously configure this value, but there was no reason to change it, so the value - was removed. - - - type: bugfix - title: Tunneled network connections behave more like ordinary TCP connections. - body: >- - When using Telepresence with an external cloud provider for extensions, those tunneled - connections now behave more like TCP connections, especially when it comes to timeouts. - We've also added increased testing around these types of connections. - - version: 2.4.7 - date: "2021-11-24" - notes: - - type: feature - title: Injector service-name annotation - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-service-name, that can be used to set the name of the service to be intercepted. - This will help disambiguate which service to intercept for when a workload is exposed by multiple services, such as can happen with Argo Rollouts - docs: reference/cluster-config#service-name-annotation - - type: feature - title: Skip the Ingress Dialogue - body: >- - You can now skip the ingress dialogue by setting the ingress parameters in the corresponding flags. - docs: reference/intercepts#skipping-the-ingress-dialogue - - type: feature - title: Never proxy subnets - body: >- - The kubeconfig extensions now support a never-proxy argument, - analogous to also-proxy, that defines a set of subnets that - will never be proxied via telepresence. - docs: reference/config#neverproxy - - type: change - title: Daemon versions check - body: >- - Telepresence now checks the versions of the client and the daemons and asks the user to quit and restart if they don't match. - - type: change - title: No explicit DNS flushes - body: >- - Telepresence DNS now uses a very short TTL instead of explicitly flushing DNS by killing the mDNSResponder or doing resolvectl flush-caches - docs: reference/routing#dns-caching - - type: bugfix - title: Legacy flags now work with global flags - body: >- - Legacy flags such as --swap-deployment can now be used together with global flags. - - type: bugfix - title: Outbound connection closing - body: >- - Outbound connections are now properly closed when the peer closes. - - type: bugfix - title: Prevent DNS recursion - body: >- - The DNS-resolver will trap recursive resolution attempts (may happen when the cluster runs in a docker-container on the client). - docs: reference/routing#dns-recursion - - type: bugfix - title: Prevent network recursion - body: >- - The TUN-device will trap failed connection attempts that results in recursive calls back into the TUN-device (may happen when the - cluster runs in a docker-container on the client). - docs: reference/routing#connect-recursion - - type: bugfix - title: Traffic Manager deadlock fix - body: >- - The Traffic Manager no longer runs a risk of entering a deadlock when a new Traffic agent arrives. - - type: bugfix - title: webhookRegistry config propagation - body: >- - The configured webhookRegistry is now propagated to the webhook installer even if no webhookAgentImage has been set. - docs: reference/config#images - - type: bugfix - title: Login refreshes expired tokens - body: >- - When a user's token has expired, telepresence login - will prompt the user to log in again to get a new token. Previously, - the user had to telepresence quit and telepresence logout - to get a new token. - docs: https://github.com/telepresenceio/telepresence/issues/2062 - - version: 2.4.6 - date: "2021-11-02" - notes: - - type: feature - title: Manually injecting Traffic Agent - body: >- - Telepresence now supports manually injecting the traffic-agent YAML into workload manifests. - Use the genyaml command to create the sidecar YAML, then add the telepresence.getambassador.io/manually-injected: "true" annotation to your pods to allow Telepresence to intercept them. - docs: reference/intercepts/manual-agent - - - type: feature - title: Telepresence CLI released for Apple silicon - body: >- - Telepresence is now built and released for Apple silicon. - docs: install/?os=macos - - - type: change - title: Telepresence help text now links to telepresence.io - body: >- - We now include a link to our documentation when you run telepresence --help. This will make it easier - for users to find this page whether they acquire Telepresence through Brew or some other mechanism. - image: telepresence-2.4.6-help-text.png - - - type: bugfix - title: Fixed bug when API server is inside CIDR range of pods/services - body: >- - If the API server for your kubernetes cluster had an IP that fell within the - subnet generated from pods/services in a kubernetes cluster, it would proxy traffic - to the API server which would result in hanging or a failed connection. We now ensure - that the API server is explicitly not proxied. - - version: 2.4.5 - date: "2021-10-15" - notes: - - type: feature - title: Get pod yaml with gather-logs command - body: >- - Adding the flag --get-pod-yaml to your request will get the - pod yaml manifest for all kubernetes components you are getting logs for - ( traffic-manager and/or pods containing a - traffic-agent container). This flag is set to false - by default. - docs: reference/client - image: telepresence-2.4.5-pod-yaml.png - - - type: feature - title: Anonymize pod name + namespace when using gather-logs command - body: >- - Adding the flag --anonymize to your command will - anonymize your pod names + namespaces in the output file. We replace the - sensitive names with simple names (e.g. pod-1, namespace-2) to maintain - relationships between the objects without exposing the real names of your - objects. This flag is set to false by default. - docs: reference/client - image: telepresence-2.4.5-logs-anonymize.png - - - type: feature - title: Support for intercepting headless services - body: >- - Intercepting headless services is now officially supported. You can request a - headless service on whatever port it exposes and get a response from the - intercept. This leverages the same approach as intercepting numeric ports when - using the mutating webhook injector, mainly requires the initContainer - to have NET_ADMIN capabilities. - docs: reference/intercepts/#intercepting-headless-services - - - type: change - title: Use one tunnel per connection instead of multiplexing into one tunnel - body: >- - We have changed Telepresence so that it uses one tunnel per connection instead - of multiplexing all connections into one tunnel. This will provide substantial - performance improvements. Clients will still be backwards compatible with older - managers that only support multiplexing. - - - type: bugfix - title: Added checks for Telepresence kubernetes compatibility - body: >- - Telepresence currently works with Kubernetes server versions 1.17.0 - and higher. We have added logs in the connector and traffic-manager - to let users know when they are using Telepresence with a cluster it doesn't support. - docs: reference/cluster-config - - - type: bugfix - title: Traffic Agent security context is now only added when necessary - body: >- - When creating an intercept, Telepresence will now only set the traffic agent's GID - when strictly necessary (i.e. when using headless services or numeric ports). This mitigates - an issue on openshift clusters where the traffic agent can fail to be created due to - openshift's security policies banning arbitrary GIDs. - - - version: 2.4.4 - date: "2021-09-27" - notes: - - type: feature - title: Numeric ports in agent injector - body: >- - The agent injector now supports injecting Traffic Agents into pods that have unnamed ports. - docs: reference/cluster-config/#note-on-numeric-ports - - - type: feature - title: New subcommand to gather logs and export into zip file - body: >- - Telepresence has logs for various components (the - traffic-manager, traffic-agents, the root and - user daemons), which are integral for understanding and debugging - Telepresence behavior. We have added the telepresence - gather-logs command to make it simple to compile logs for - all Telepresence components and export them in a zip file that can - be shared to others and/or included in a github issue. For more - information on usage, run telepresence gather-logs --help - . - docs: reference/client - image: telepresence-2.4.4-gather-logs.png - - - type: feature - title: Pod CIDR strategy is configurable in Helm chart - body: >- - Telepresence now enables you to directly configure how to get - pod CIDRs when deploying Telepresence with the Helm chart. - The default behavior remains the same. We've also introduced - the ability to explicitly set what the pod CIDRs should be. - docs: install/helm - - - type: bugfix - title: Compute pod CIDRs more efficiently - body: >- - When computing subnets using the pod CIDRs, the traffic-manager - now uses less CPU cycles. - docs: reference/routing/#subnets - - - type: bugfix - title: Prevent busy loop in traffic-manager - body: >- - In some circumstances, the traffic-manager's CPU - would max out and get pinned at its limit. This required a - shutdown or pod restart to fix. We've added some fixes - to prevent the traffic-manager from getting into this state. - - - type: bugfix - title: Added a fixed buffer size to TUN-device - body: >- - The TUN-device now has a max buffer size of 64K. This prevents the - buffer from growing limitlessly until it receies a PSH, which could - be a blocking operation when receiving lots of TCP-packets. - docs: reference/tun-device - - - type: bugfix - title: Fix hanging user daemon - body: >- - When Telepresence encountered an issue connecting to the cluster or - the root daemon, it could hang indefintely. It now will error correctly - when it encounters that situation. - - - type: bugfix - title: Improved proprietary agent connectivity - body: >- - To determine whether the environment cluster is air-gapped, the - proprietary agent attempts to connect to the cloud during startup. - To deal with a possible initial failure, the agent backs off - and retries the connection with an increasing backoff duration. - - - type: bugfix - title: Telepresence correctly reports intercept port conflict - body: >- - When creating a second intercept targetting the same local port, - it now gives the user an informative error message. Additionally, - it tells them which intercept is currently using that port to make - it easier to remedy. - - - version: 2.4.3 - date: "2021-09-15" - notes: - - type: feature - title: Environment variable TELEPRESENCE_INTERCEPT_ID available in interceptor's environment - body: >- - When you perform an intercept, we now include a TELEPRESENCE_INTERCEPT_ID environment - variable in the environment. - docs: reference/environment/#telepresence-environment-variables - - - type: bugfix - title: Improved daemon stability - body: >- - Fixed a timing bug that sometimes caused a "daemon did not start" failure. - - - type: bugfix - title: Complete logs for Windows - body: >- - Crash stack traces and other errors were incorrectly not written to log files. This has - been fixed so logs for Windows should be at parity with the ones in MacOS and Linux. - - - type: bugfix - title: Log rotation fix for Linux kernel 4.11+ - body: >- - On Linux kernel 4.11 and above, the log file rotation now properly reads the - birth-time of the log file. Older kernels continue to use the old behavior - of using the change-time in place of the birth-time. - - - type: bugfix - title: Improved error messaging - body: >- - When Telepresence encounters an error, it tells the user where they should look for - logs related to the error. We have refined this so that it only tells users to look - for errors in the daemon logs for issues that are logged there. - - - type: bugfix - title: Stop resolving localhost - body: >- - When using the overriding DNS resolver, it will no longer apply search paths when - resolving localhost, since that should be resolved on the user's machine - instead of the cluster. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Variable cluster domain - body: >- - Previously, the cluster domain was hardcoded to cluster.local. While this - is true for many kubernetes clusters, it is not for all of them. Now this value is - retrieved from the traffic-manager. - - - type: bugfix - title: Improved cleanup of traffic-agents - body: >- - Telepresence now uninstalls traffic-agents installed via mutating webhook - when using telepresence uninstall --everything. - - - type: bugfix - title: More large file transfer fixes - body: >- - Downloading large files during an intercept will no longer cause timeouts and hanging - traffic-agents. - - - type: bugfix - title: Setting --mount to false when intercepting works as expected - body: >- - When using --mount=false while performing an intercept, the file system - was still mounted. This has been remedied so the intercept behavior respects the - flag. - docs: reference/volume - - - type: bugfix - title: Traffic-manager establishes outbound connections in parallel - body: >- - Previously, the traffic-manager established outbound connections - sequentially. This resulted in slow (and failing) Dial calls would - block all outbound traffic from the workstation (for up to 30 seconds). We now - establish these connections in parallel so that won't occur. - docs: reference/routing/#outbound - - - type: bugfix - title: Status command reports correct DNS settings - body: >- - Telepresence status now correctly reports DNS settings for all operating - systems, instead of Local IP:nil, Remote IP:nil when they don't exist. - - - version: 2.4.2 - date: "2021-09-01" - notes: - - type: feature - title: New subcommand to temporarily change log-level - body: >- - We have added a new telepresence loglevel subcommand that enables users - to temporarily change the log-level for the local demons, the traffic-manager and - the traffic-agents. While the logLevels settings from the config will - still be used by default, this can be helpful if you are currently experiencing an issue and - want to have higher fidelity logs, without doing a telepresence quit and - telepresence connect. You can use telepresence loglevel --help to get - more information on options for the command. - docs: reference/config - - - type: change - title: All components have info as the default log-level - body: >- - We've now set the default for all components of Telepresence (traffic-agent, - traffic-manager, local daemons) to use info as the default log-level. - - - type: bugfix - title: Updating RBAC in helm chart to fix cluster-id regression - body: >- - In 2.4.1, we enabled the traffic-manager to get the cluster ID by getting the UID - of the default namespace. The helm chart was not updated to give the traffic-manager - those permissions, which has since been fixed. This impacted users who use licensed features of - the Telepresence extension in an air-gapped environment. - docs: reference/cluster-config/#air-gapped-cluster - - - type: bugfix - title: Timeouts for Helm actions are now respected - body: >- - The user-defined timeout for Helm actions wasn't always respected, causing the daemon to hang - indefinitely when failing to install the traffic-manager. - docs: reference/config#timeouts - - - version: 2.4.1 - date: "2021-08-30" - notes: - - type: feature - title: External cloud variables are now configurable - body: >- - We now support configuring the host and port for the cloud in your config.yml. These - are used when logging in to utilize features provided by an extension, and are also passed - along as environment variables when installing the traffic-manager. Additionally, we - now run our testsuite with these variables set to localhost to continue to ensure Telepresence - is fully fuctional without depeneding on an external service. The SYSTEMA_HOST and SYSTEMA_PORT - environment variables are no longer used. - image: telepresence-2.4.1-systema-vars.png - docs: reference/config/#cloud - - - type: feature - title: Helm chart can now regenerate certificate used for mutating webhook on-demand. - body: >- - You can now set agentInjector.certificate.regenerate when deploying Telepresence - with the Helm chart to automatically regenerate the certificate used by the agent injector webhook. - docs: install/helm - - - type: change - title: Traffic Manager installed via helm - body: >- - The traffic-manager is now installed via an embedded version of the Helm chart when telepresence connect is first performed on a cluster. - This change is transparent to the user. - A new configuration flag, timeouts.helm sets the timeouts for all helm operations performed by the Telepresence binary. - docs: reference/config#timeouts - - - type: change - title: traffic-manager gets cluster ID itself instead of via environment variable - body: >- - The traffic-manager used to get the cluster ID as an environment variable when running - telepresence connnect or via adding the value in the helm chart. This was - clunky so now the traffic-manager gets the value itself as long as it has permissions - to "get" and "list" namespaces (this has been updated in the helm chart). - docs: install/helm - - - type: bugfix - title: Telepresence now mounts all directories from /var/run/secrets - body: >- - In the past, we only mounted secret directories in /var/run/secrets/kubernetes.io. - We now mount *all* directories in /var/run/secrets, which, for example, includes - directories like eks.amazonaws.com used for IRSA tokens. - docs: reference/volume - - - type: bugfix - title: Max gRPC receive size correctly propagates to all grpc servers - body: >- - This fixes a bug where the max gRPC receive size was only propagated to some of the - grpc servers, causing failures when the message size was over the default. - docs: reference/config/#grpc - - - type: bugfix - title: Updated our Homebrew packaging to run manually - body: >- - We made some updates to our script that packages Telepresence for Homebrew so that it - can be run manually. This will enable maintainers of Telepresence to run the script manually - should we ever need to rollback a release and have latest point to an older verison. - docs: install/ - - - type: bugfix - title: Telepresence uses namespace from kubeconfig context on each call - body: >- - In the past, Telepresence would use whatever namespace was specified in the kubeconfig's current-context - for the entirety of the time a user was connected to Telepresence. This would lead to confusing behavior - when a user changed the context in their kubeconfig and expected Telepresence to acknowledge that change. - Telepresence now will do that and use the namespace designated by the context on each call. - - - type: bugfix - title: Idle outbound TCP connections timeout increased to 7200 seconds - body: >- - Some users were noticing that their intercepts would start failing after 60 seconds. - This was because the keep idle outbound TCP connections were set to 60 seconds, which we have - now bumped to 7200 seconds to match Linux's tcp_keepalive_time default. - - - type: bugfix - title: Telepresence will automatically remove a socket upon ungraceful termination - body: >- - When a Telepresence process terminates ungracefully, it would inform users that "this usually means - that the process has terminated ungracefully" and implied that they should remove the socket. We've - now made it so Telepresence will automatically attempt to remove the socket upon ungraceful termination. - - - type: bugfix - title: Fixed user daemon deadlock - body: >- - Remedied a situation where the user daemon could hang when a user was logged in. - - - type: bugfix - title: Fixed agentImage config setting - body: >- - The config setting images.agentImages is no longer required to contain the repository, and it - will use the value at images.repository. - docs: reference/config/#images - - - version: 2.4.0 - date: "2021-08-04" - notes: - - type: feature - title: Windows Client Developer Preview - body: >- - There is now a native Windows client for Telepresence that is being released as a Developer Preview. - All the same features supported by the MacOS and Linux client are available on Windows. - image: telepresence-2.4.0-windows.png - docs: install - - - type: feature - title: CLI raises helpful messages from Ambassador Cloud - body: >- - Telepresence can now receive messages from Ambassador Cloud and raise - them to the user when they perform certain commands. This enables us - to send you messages that may enhance your Telepresence experience when - using certain commands. Frequency of messages can be configured in your - config.yml. - image: telepresence-2.4.0-cloud-messages.png - docs: reference/config#cloud - - - type: bugfix - title: Improved stability of systemd-resolved-based DNS - body: >- - When initializing the systemd-resolved-based DNS, the routing domain - is set to improve stability in non-standard configurations. This also enables the - overriding resolver to do a proper take over once the DNS service ends. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Fixed an edge case when intercepting a container with multiple ports - body: >- - When specifying a port of a container to intercept, if there was a container in the - pod without ports, it was automatically selected. This has been fixed so we'll only - choose the container with "no ports" if there's no container that explicitly matches - the port used in your intercept. - docs: reference/intercepts/#creating-an-intercept-when-a-service-has-multiple-ports - - - type: bugfix - title: $(NAME) references in agent's environments are now interpolated correctly. - body: >- - If you had an environment variable $(NAME) in your workload that referenced another, intercepts - would not correctly interpolate $(NAME). This has been fixed and works automatically. - - - type: bugfix - title: Telepresence no longer prints INFO message when there is no config.yml - body: >- - Fixed a regression that printed an INFO message to the terminal when there wasn't a - config.yml present. The config is optional, so this message has been - removed. - docs: reference/config - - - type: bugfix - title: Telepresence no longer panics when using --http-match - body: >- - Fixed a bug where Telepresence would panic if the value passed to --http-match - didn't contain an equal sign, which has been fixed. The correct syntax is in the --help - string and looks like --http-match=HTTP2_HEADER=REGEX - docs: reference/intercepts/#intercept-behavior-when-logged-in-to-ambassador-cloud - - - type: bugfix - title: Improved subnet updates - body: >- - The traffic-manager used to update subnets whenever the Nodes or Pods changed, even if - the underlying subnet hadn't changed, which created a lot of unnecessary traffic between the - client and the traffic-manager. This has been fixed so we only send updates when the subnets - themselves actually change. - docs: reference/routing/#subnets - - - version: 2.3.7 - date: "2021-07-23" - notes: - - type: feature - title: Also-proxy in telepresence status - body: >- - An also-proxy entry in the Kubernetes cluster config will - show up in the output of the telepresence status command. - docs: reference/config - - - type: feature - title: Non-interactive telepresence login - body: >- - telepresence login now has an - --apikey=KEY flag that allows for - non-interactive logins. This is useful for headless - environments where launching a web-browser is impossible, - such as cloud shells, Docker containers, or CI. - image: telepresence-2.3.7-newkey.png - docs: reference/client/login/ - - - type: bugfix - title: Mutating webhook injector correctly hides named ports for probes. - body: >- - The mutating webhook injector has been fixed to correctly rename named ports for liveness and readiness probes - docs: reference/cluster-config - - - type: bugfix - title: telepresence current-cluster-id crash fixed - body: >- - Fixed a regression introduced in 2.3.5 that caused telepresence current-cluster-id - to crash. - docs: reference/cluster-config - - - type: bugfix - title: Better UX around intercepts with no local process running - body: >- - Requests would hang indefinitely when initiating an intercept before you - had a local process running. This has been fixed and will result in an - Empty reply from server until you start a local process. - docs: reference/intercepts - - - type: bugfix - title: API keys no longer show as "no description" - body: >- - New API keys generated internally for communication with - Ambassador Cloud no longer show up as "no description" in - the Ambassador Cloud web UI. Existing API keys generated by - older versions of Telepresence will still show up this way. - image: telepresence-2.3.7-keydesc.png - - - type: bugfix - title: Fix corruption of user-info.json - body: >- - Fixed a race condition that logging in and logging out - rapidly could cause memory corruption or corruption of the - user-info.json cache file used when - authenticating with Ambassador Cloud. - - - type: bugfix - title: Improved DNS resolver for systemd-resolved - body: - Telepresence's systemd-resolved-based DNS resolver is now more - stable and in case it fails to initialize, the overriding resolver - will no longer cause general DNS lookup failures when telepresence defaults to - using it. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Faster telepresence list command - body: - The performance of telepresence list has been increased - significantly by reducing the number of calls the command makes to the cluster. - docs: reference/client - - - version: 2.3.6 - date: "2021-07-20" - notes: - - type: bugfix - title: Fix subnet discovery - body: >- - Fixed a regression introduced in 2.3.5 where the Traffic - Manager's RoleBinding did not correctly appoint - the traffic-manager Role, causing - subnet discovery to not be able to work correctly. - docs: reference/rbac/ - - - type: bugfix - title: Fix root-user configuration loading - body: >- - Fixed a regression introduced in 2.3.5 where the root daemon - did not correctly read the configuration file; ignoring the - user's configured log levels and timeouts. - docs: reference/config/ - - - type: bugfix - title: Fix a user daemon crash - body: >- - Fixed an issue that could cause the user daemon to crash - during shutdown, as during shutdown it unconditionally - attempted to close a channel even though the channel might - already be closed. - - - version: 2.3.5 - date: "2021-07-15" - notes: - - type: feature - title: traffic-manager in multiple namespaces - body: >- - We now support installing multiple traffic managers in the same cluster. - This will allow operators to install deployments of telepresence that are - limited to certain namespaces. - image: ./telepresence-2.3.5-traffic-manager-namespaces.png - docs: install/helm - - type: feature - title: No more dependence on kubectl - body: >- - Telepresence no longer depends on having an external - kubectl binary, which might not be present for - OpenShift users (who have oc instead of - kubectl). - - type: feature - title: Max gRPC receive size now configurable - body: >- - The default max size of messages received through gRPC (4 MB) is sometimes insufficient. It can now be configured. - image: ./telepresence-2.3.5-grpc-max-receive-size.png - docs: reference/config/#grpc - - type: feature - title: CLI can be used in air-gapped environments - body: >- - While Telepresence will auto-detect if your cluster is in an air-gapped environment, - we've added an option users can add to their config.yml to ensure the cli acts like it - is in an air-gapped environment. Air-gapped environments require a manually installed - licence. - docs: reference/cluster-config/#air-gapped-cluster - image: ./telepresence-2.3.5-skipLogin.png - - version: 2.3.4 - date: "2021-07-09" - notes: - - type: bugfix - title: Improved IP log statements - body: >- - Some log statements were printing incorrect characters, when they should have been IP addresses. - This has been resolved to include more accurate and useful logging. - docs: reference/config/#log-levels - image: ./telepresence-2.3.4-ip-error.png - - type: bugfix - title: Improved messaging when multiple services match a workload - body: >- - If multiple services matched a workload when performing an intercept, Telepresence would crash. - It now gives the correct error message, instructing the user on how to specify which - service the intercept should use. - image: ./telepresence-2.3.4-improved-error.png - docs: reference/intercepts - - type: bugfix - title: Traffic-manger creates services in its own namespace to determine subnet - body: >- - Telepresence will now determine the service subnet by creating a dummy-service in its own - namespace, instead of the default namespace, which was causing RBAC permissions issues in - some clusters. - docs: reference/routing/#subnets - - type: bugfix - title: Telepresence connect respects pre-existing clusterrole - body: >- - When Telepresence connects, if the traffic-manager's desired clusterrole already exists in the - cluster, Telepresence will no longer try to update the clusterrole. - docs: reference/rbac - - type: bugfix - title: Helm Chart fixed for clientRbac.namespaced - body: >- - The Telepresence Helm chart no longer fails when installing with --set clientRbac.namespaced=true. - docs: install/helm - - version: 2.3.3 - date: "2021-07-07" - notes: - - type: feature - title: Traffic Manager Helm Chart - body: >- - Telepresence now supports installing the Traffic Manager via Helm. - This will make it easy for operators to install and configure the - server-side components of Telepresence separately from the CLI (which - in turn allows for better separation of permissions). - image: ./telepresence-2.3.3-helm.png - docs: install/helm/ - - type: feature - title: Traffic-manager in custom namespace - body: >- - As the traffic-manager can now be installed in any - namespace via Helm, Telepresence can now be configured to look for the - Traffic Manager in a namespace other than ambassador. - This can be configured on a per-cluster basis. - image: ./telepresence-2.3.3-namespace-config.png - docs: reference/config - - type: feature - title: Intercept --to-pod - body: >- - telepresence intercept now supports a - --to-pod flag that can be used to port-forward sidecars' - ports from an intercepted pod. - image: ./telepresence-2.3.3-to-pod.png - docs: reference/intercepts - - type: change - title: Change in migration from edgectl - body: >- - Telepresence no longer automatically shuts down the old - api_version=1 edgectl daemon. If migrating - from such an old version of edgectl you must now manually - shut down the edgectl daemon before running Telepresence. - This was already the case when migrating from the newer - api_version=2 edgectl. - - type: bugfix - title: Fixed error during shutdown - body: >- - The root daemon no longer terminates when the user daemon disconnects - from its gRPC streams, and instead waits to be terminated by the CLI. - This could cause problems with things not being cleaned up correctly. - - type: bugfix - title: Intercepts will survive deletion of intercepted pod - body: >- - An intercept will survive deletion of the intercepted pod provided - that another pod is created (or already exists) that can take over. - - version: 2.3.2 - date: "2021-06-18" - notes: - # Headliners - - type: feature - title: Service Port Annotation - body: >- - The mutator webhook for injecting traffic-agents now - recognizes a - telepresence.getambassador.io/inject-service-port - annotation to specify which port to intercept; bringing the - functionality of the --port flag to users who - use the mutator webook in order to control Telepresence via - GitOps. - image: ./telepresence-2.3.2-svcport-annotation.png - docs: reference/cluster-config#service-port-annotation - - type: feature - title: Outbound Connections - body: >- - Outbound connections are now routed through the intercepted - Pods which means that the connections originate from that - Pod from the cluster's perspective. This allows service - meshes to correctly identify the traffic. - docs: reference/routing/#outbound - - type: change - title: Inbound Connections - body: >- - Inbound connections from an intercepted agent are now - tunneled to the manager over the existing gRPC connection, - instead of establishing a new connection to the manager for - each inbound connection. This avoids interference from - certain service mesh configurations. - docs: reference/routing/#inbound - - # RBAC changes - - type: change - title: Traffic Manager needs new RBAC permissions - body: >- - The Traffic Manager requires RBAC - permissions to list Nodes, Pods, and to create a dummy - Service in the manager's namespace. - docs: reference/routing/#subnets - - type: change - title: Reduced developer RBAC requirements - body: >- - The on-laptop client no longer requires RBAC permissions to list the Nodes - in the cluster or to create Services, as that functionality - has been moved to the Traffic Manager. - - # Bugfixes - - type: bugfix - title: Able to detect subnets - body: >- - Telepresence will now detect the Pod CIDR ranges even if - they are not listed in the Nodes. - image: ./telepresence-2.3.2-subnets.png - docs: reference/routing/#subnets - - type: bugfix - title: Dynamic IP ranges - body: >- - The list of cluster subnets that the virtual network - interface will route is now configured dynamically and will - follow changes in the cluster. - - type: bugfix - title: No duplicate subnets - body: >- - Subnets fully covered by other subnets are now pruned - internally and thus never superfluously added to the - laptop's routing table. - docs: reference/routing/#subnets - - type: change # not a bugfix, but it only makes sense to mention after the above bugfixes - title: Change in default timeout - body: >- - The trafficManagerAPI timeout default has - changed from 5 seconds to 15 seconds, in order to facilitate - the extended time it takes for the traffic-manager to do its - initial discovery of cluster info as a result of the above - bugfixes. - - type: bugfix - title: Removal of DNS config files on macOS - body: >- - On macOS, files generated under - /etc/resolver/ as the result of using - include-suffixes in the cluster config are now - properly removed on quit. - docs: reference/routing/#macos-resolver - - - type: bugfix - title: Large file transfers - body: >- - Telepresence no longer erroneously terminates connections - early when sending a large HTTP response from an intercepted - service. - - type: bugfix - title: Race condition in shutdown - body: >- - When shutting down the user-daemon or root-daemon on the - laptop, telepresence quit and related commands - no longer return early before everything is fully shut down. - Now it can be counted on that by the time the command has - returned that all of the side-effects on the laptop have - been cleaned up. - - version: 2.3.1 - date: "2021-06-14" - notes: - - title: DNS Resolver Configuration - body: "Telepresence now supports per-cluster configuration for custom dns behavior, which will enable users to determine which local + remote resolver to use and which suffixes should be ignored + included. These can be configured on a per-cluster basis." - image: ./telepresence-2.3.1-dns.png - docs: reference/config - type: feature - - title: AlsoProxy Configuration - body: "Telepresence now supports also proxying user-specified subnets so that they can access external services only accessible to the cluster while connected to Telepresence. These can be configured on a per-cluster basis and each subnet is added to the TUN device so that requests are routed to the cluster for IPs that fall within that subnet." - image: ./telepresence-2.3.1-alsoProxy.png - docs: reference/config - type: feature - - title: Mutating Webhook for Injecting Traffic Agents - body: "The Traffic Manager now contains a mutating webhook to automatically add an agent to pods that have the telepresence.getambassador.io/traffic-agent: enabled annotation. This enables Telepresence to work well with GitOps CD platforms that rely on higher level kubernetes objects matching what is stored in git. For workloads without the annotation, Telepresence will add the agent the way it has in the past" - image: ./telepresence-2.3.1-inject.png - docs: reference/rbac - type: feature - - title: Traffic Manager Connect Timeout - body: "The trafficManagerConnect timeout default has changed from 20 seconds to 60 seconds, in order to facilitate the extended time it takes to apply everything needed for the mutator webhook." - image: ./telepresence-2.3.1-trafficmanagerconnect.png - docs: reference/config - type: change - - title: Fix for large file transfers - body: "Fix a tun-device bug where sometimes large transfers from services on the cluster would hang indefinitely" - image: ./telepresence-2.3.1-large-file-transfer.png - docs: reference/tun-device - type: bugfix - - title: Brew Formula Changed - body: "Now that the Telepresence rewrite is the main version of Telepresence, you can install it via Brew like so: brew install datawire/blackbird/telepresence." - image: ./telepresence-2.3.1-brew.png - docs: install/ - type: change - - version: 2.3.0 - date: "2021-06-01" - notes: - - title: Brew install Telepresence - body: "Telepresence can now be installed via brew on macOS, which makes it easier for users to stay up-to-date with the latest telepresence version. To install via brew, you can use the following command: brew install datawire/blackbird/telepresence2." - image: ./telepresence-2.3.0-homebrew.png - docs: install/ - type: feature - - title: TCP and UDP routing via Virtual Network Interface - body: "Telepresence will now perform routing of outbound TCP and UDP traffic via a Virtual Network Interface (VIF). The VIF is a layer 3 TUN-device that exists while Telepresence is connected. It makes the subnets in the cluster available to the workstation and will also route DNS requests to the cluster and forward them to intercepted pods. This means that pods with custom DNS configuration will work as expected. Prior versions of Telepresence would use firewall rules and were only capable of routing TCP." - image: ./tunnel.jpg - docs: reference/tun-device - type: feature - - title: SSH is no longer used - body: "All traffic between the client and the cluster is now tunneled via the traffic manager gRPC API. This means that Telepresence no longer uses ssh tunnels and that the manager no longer have an sshd installed. Volume mounts are still established using sshfs but it is now configured to communicate using the sftp-protocol directly, which means that the traffic agent also runs without sshd. A desired side effect of this is that the manager and agent containers no longer need a special user configuration." - image: ./no-ssh.png - docs: reference/tun-device/#no-ssh-required - type: change - - title: Running in a Docker container - body: "Telepresence can now be run inside a Docker container. This can be useful for avoiding side effects on a workstation's network, establishing multiple sessions with the traffic manager, or working with different clusters simultaneously." - image: ./run-tp-in-docker.png - docs: reference/inside-container - type: feature - - title: Configurable Log Levels - body: "Telepresence now supports configuring the log level for Root Daemon and User Daemon logs. This provides control over the nature and volume of information that Telepresence generates in daemon.log and connector.log." - image: ./telepresence-2.3.0-loglevels.png - docs: reference/config/#log-levels - type: feature - - version: 2.2.2 - date: "2021-05-17" - notes: - - title: Legacy Telepresence subcommands - body: Telepresence is now able to translate common legacy Telepresence commands into native Telepresence commands. So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used to with the new Telepresence binary. - image: ./telepresence-2.2.png - docs: install/migrate-from-legacy/ - type: feature diff --git a/docs/2.14/versions.yml b/docs/2.14/versions.yml deleted file mode 100644 index 18468449..00000000 --- a/docs/2.14/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.14.0" -dlVersion: "v2.14.0" -docsVersion: "2.14" -branch: release/v2 -productName: "Telepresence OSS" diff --git a/docs/latest b/docs/latest new file mode 120000 index 00000000..94d4153c --- /dev/null +++ b/docs/latest @@ -0,0 +1 @@ +v2.19 \ No newline at end of file diff --git a/docs/latest/community.md b/docs/latest/community.md deleted file mode 100644 index 922457c9..00000000 --- a/docs/latest/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/latest/faqs.md b/docs/latest/faqs.md deleted file mode 100644 index 0e12bf0c..00000000 --- a/docs/latest/faqs.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -** What operating systems does Telepresence work on?** - -Telepresence currently works natively on macOS (Intel and Apple Silicon), Linux, and Windows. - -** What protocols can be intercepted by Telepresence?** - -Both TCP and UDP are supported for global intercepts. - -Personal intercepts require HTTP. All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](https://www.getambassador.io/feedback/) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - - - - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes, but it doesn't need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the traffic-manager and traffic-agent images that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon unless it runs in a Docker container?** - -The local daemon needs sudo to create a VIF (Virtual Network Interface) for outbound routing and DNS. Root access is needed to do that unless the daemon runs in a Docker container. - -** What components get installed in the cluster when running Telepresence?** - -A single `traffic-manager` service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all the Telepresence components installed within my cluster?** - -You can run the command `telepresence helm uninstall` to remove everything from the cluster, including the `traffic-manager`, and all the `traffic-agent` containers injected into each pod being intercepted. - -Also run `telepresence quit -s` to stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established by using -the `kubectl port-forward` machinery (though without actually spawning -a separate program) to establish a TLS encrypted connection to Telepresence -Traffic Manager in the cluster, and running Telepresence's custom VPN -protocol over that connection. - - - -** Is Telepresence OSS open source?** - -Yes it is! You can find its source code on [GitHub](https://github.com/telepresenceio/telepresence). - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](https://www.getambassador.io/feedback/), or you can [join our Slack channel](https://slack.cncf.io/) to share your thoughts. diff --git a/docs/latest/quick-start/TelepresenceQuickStartLanding.js b/docs/latest/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 9395b9cb..00000000 --- a/docs/latest/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,61 +0,0 @@ -import React from 'react'; -import Icon from '../../../../../src/components/Icon'; -import Link from '../../../../../src/components/Link'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -const TelepresenceQuickStartLanding = () => { - - return ( -
-

- Telepresence OSS -

-

- Set up your ideal development environment for Kubernetes in seconds. - Accelerate your inner development loop with hot reload using your - existing IDE, and workflow. -

- -
-
-
-

- Install Telepresence and connect to your Kubernetes workloads. -

- - Get Started - -
-
-
- -
-
-

- What Can Telepresence Do for You? -

-

Telepresence gives Kubernetes application developers:

-
    -
  • Make changes on the fly and see them reflected when interacting with your remote Kubernetes environment, this is just like hot reloading, but it works across both local and remote environments.
  • -
  • Query services and microservice APIs that are only accessible in your remote cluster's network.
  • -
  • Set breakpoints in your IDE and re-route remote traffic to your local machine to investigate bugs with realistic user traffic and API calls.
  • -
- - LEARN MORE{' '} - - -
-
-
- ); -}; - -export default TelepresenceQuickStartLanding; diff --git a/docs/latest/quick-start/index.md b/docs/latest/quick-start/index.md deleted file mode 100644 index b75dfa2f..00000000 --- a/docs/latest/quick-start/index.md +++ /dev/null @@ -1,177 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Telepresence Quickstart - -Telepresence is an open source tool that enables you to set up remote development environments for Kubernetes where you can still use all of your favorite local tools like IDEs, debuggers, and profilers. - -## Prerequisites - - - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), the Kubernetes command-line tool, or the OpenShift Container Platform command-line interface, [oc](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). - - A Kubernetes Deployment and Service. - - - - **Don’t have access to Kubernetes cluster?** Try Telepresence in a free remote Kubernetes cluster without having to mess with your production environment. [Get Started >](https://app.getambassador.io/cloud/welcome?select=developer&utm_source=telepresence&utm_medium=website&utm_campaign=quickstart). - - - -## Install Telepresence on Your Machine - -Install Telepresence by running the relevant commands below for your OS. If you are not the administrator of your cluster, you will need [administrative RBAC permissions](https://www.getambassador.io/docs/telepresence-oss/latest/reference/rbac#administrating-telepresence) to install and use the Telepresence traffic-manager in your cluster. - - - - -```shell -# Intel Macs - -# 1. Download the latest binary (~105 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-amd64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# 1. Download the latest binary (~101 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-arm64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~95 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-linux-amd64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -Installing Telepresence on Windows is easy. Download this [zip folder](https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-windows-amd64.zip) and run the powershell script. - - - -## Install Telepresence in Your Cluster - -1. Install the traffic manager into your cluster with `telepresence helm install`. More information about installing Telepresence can be found [here](https://www.getambassador.io/docs/telepresence-oss/latest/install/manager). This will require root access on your machine. - -``` -$ telepresence helm install -... -Traffic Manager installed successfully -``` - -## Intercept Your Service - -With Telepresence, you can create [global intercepts](https://www.getambassador.io/docs/telepresence-oss/latest/concepts/intercepts?intercept=global) that intercept all traffic going to a service in your remote cluster and route it to your local environment instead. - -1. Connect to your cluster with `telepresence connect` and connect to the Kubernetes API server: - - ``` - $ telepresence connect - connected to context - - ``` - - ```console - $ curl -ik https://kubernetes.default - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - The 401 response is expected when you first connect. - - - You now have access to your remote Kubernetes API server as if you were on the same network. You can now use any local tools to connect to any service in the cluster. - -2. Enter `telepresence list` and make sure the service you want to intercept is listed. For example: - - ``` - $ telepresence list - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -3. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example: - - ```console - $ kubectl get service example-service --output yaml - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -4. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - - - For `--port`: specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - - For `--env-file`: specify a file path for Telepresence to write the environment variables that are set in the pod. - The example below shows Telepresence intercepting traffic going to service `example-service`. Requests now reach the service on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - -The following are some examples of how to pass the environment variables to your local process: - -- **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#env). -- **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. -- **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Query the environment in which you intercepted a service and verify your local instance being invoked. - All the traffic previously routed to your Kubernetes Service is now routed to your local environment - -## 🎉 You've Unlocked a Faster Development Workflow for Kubernetes with Telepresence - -Now, with Telepresence, you can: - --
- Make changes on the fly and see them reflected when interacting with your remote Kubernetes environment, this is just like hot reloading, but it works across both local and remote environments. -
--
Query services and microservice APIs that are only accessible in your remote cluster's network.
--
Set breakpoints in your IDE and re-route remote traffic to your local machine to investigate bugs with realistic user traffic and API calls.
- - - - **Didn't work?** Make sure the port you're listening on matches the one you specified when you created your intercept. - - - -## What’s Next? -- [Learn about the Telepresence architecture.](https://www.getambassador.io/docs/telepresence-oss/latest/reference/architecture) - - \ No newline at end of file diff --git a/docs/latest/quick-start/qs-cards.js b/docs/latest/quick-start/qs-cards.js deleted file mode 100644 index 5b68aa4a..00000000 --- a/docs/latest/quick-start/qs-cards.js +++ /dev/null @@ -1,71 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import { Link as GatsbyLink } from 'gatsby'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - - - - - FAQs - - - - Learn more about uses cases and the technical implementation of - Telepresence. - - - - -
- ); -} diff --git a/docs/latest/quick-start/telepresence-quickstart-landing.less b/docs/latest/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index e2a83df4..00000000 --- a/docs/latest/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,152 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.doc-body .telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: -8.4px auto 48px; - max-width: 1050px; - min-width: @docs-min-width; - width: 100%; - - h1 { - color: @blue-dark; - font-weight: normal; - letter-spacing: 0.25px; - font-size: 33px; - margin: 0 0 15px; - } - p { - font-size: 0.875rem; - line-height: 24px; - margin: 0; - padding: 0; - } - - .demo-cluster-container { - display: grid; - margin: 40px 0; - grid-template-columns: 1fr; - grid-template-columns: 1fr; - @media screen and (max-width: 900px) { - grid-template-columns: repeat(1, 1fr); - } - } - .main-title-container { - display: flex; - flex-direction: column; - align-items: center; - p { - text-align: center; - font-size: 0.875rem; - } - } - h2 { - font-size: 23px; - color: @black; - margin: 0 0 20px 0; - padding: 0; - &.underlined { - padding-bottom: 2px; - border-bottom: 3px solid @grey-separator; - text-align: center; - } - strong { - font-weight: 800; - } - &.subtitle { - margin-bottom: 10px; - font-size: 19px; - line-height: 28px; - } - } - .learn-more, - .get-started { - font-size: 14px; - font-weight: 600; - letter-spacing: 1.25px; - display: flex; - align-items: center; - text-decoration: none; - &.inline { - display: inline-block; - text-decoration: underline; - font-size: unset; - font-weight: normal; - &:hover { - text-decoration: none; - } - } - &.blue { - color: @blue-5; - } - &.blue:hover { - color: @blue-dark; - } - } - - .learn-more { - margin-top: 20px; - padding: 13px 0; - } - - .box-container { - &.border { - border: 1.5px solid @grey-separator; - border-radius: 5px; - padding: 10px; - } - &::before { - content: ''; - position: absolute; - width: 14px; - height: 14px; - border-radius: 50%; - top: 0; - left: 50%; - transform: translate(-50%, -50%); - } - p { - font-size: 0.875rem; - line-height: 24px; - padding: 0; - } - } - - .telepresence-video { - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 18px; - h2.telepresence-video-title { - font-weight: 400; - font-size: 23px; - line-height: 33px; - color: @blue-6; - } - } - - .video-section { - display: grid; - grid-template-columns: 1fr 1fr; - column-gap: 20px; - @media screen and (max-width: 800px) { - grid-template-columns: 1fr; - } - ul { - font-size: 14px; - margin: 0 10px 6px 0; - } - .video-container { - position: relative; - padding-bottom: 56.25%; // 16:9 aspect ratio - height: 0; - iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - } - } - } -} diff --git a/docs/latest/redirects.yml b/docs/latest/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/latest/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/latest/releaseNotes.yml b/docs/latest/releaseNotes.yml deleted file mode 100644 index 46a862d0..00000000 --- a/docs/latest/releaseNotes.yml +++ /dev/null @@ -1,2345 +0,0 @@ -# This file should be placed in the folder for the version of the -# product that's meant to be documented. A `/release-notes` page will -# be automatically generated and populated at build time. -# -# Note that an entry needs to be added to the `doc-links.yml` file in -# order to surface the release notes in the table of contents. -# -# The YAML in this file should contain: -# -# changelog: An (optional) URL to the CHANGELOG for the product. -# items: An array of releases with the following attributes: -# - version: The (optional) version number of the release, if applicable. -# - date: The date of the release in the format YYYY-MM-DD. -# - notes: An array of noteworthy changes included in the release, each having the following attributes: -# - type: The type of change, one of `bugfix`, `feature`, `security` or `change`. -# - title: A short title of the noteworthy change. -# - body: >- -# Two or three sentences describing the change and why it -# is noteworthy. This is HTML, not plain text or -# markdown. It is handy to use YAML's ">-" feature to -# allow line-wrapping. -# - image: >- -# The URL of an image that visually represents the -# noteworthy change. This path is relative to the -# `release-notes` directory; if this file is -# `FOO/releaseNotes.yml`, then the image paths are -# relative to `FOO/release-notes/`. -# - docs: The path to the documentation page where additional information can be found. -# - href: A path from the root to a resource on the getambassador website, takes precedence over a docs link. - -docTitle: Telepresence Release Notes -docDescription: >- - Release notes for Telepresence by Ambassador Labs, a CNCF project - that enables developers to iterate rapidly on Kubernetes - microservices by arming them with infinite-scale development - environments, access to instantaneous feedback loops, and highly - customizable development environments. - -changelog: https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md - -items: - - version: 2.15.0 - date: "2023-08-29" - notes: - - type: security - title: Add ASLR to telepresence binaries - body: >- - ASLR hardens binary sercurity against fixed memory attacks. - - type: feature - title: Added client builds for arm64 architecture. - body: >- - Updated the release workflow files in github actions to including building and publishing the client binaries for arm64 architecture. - docs: https://github.com/telepresenceio/telepresence/issues/3259 - - type: bugfix - title: KUBECONFIG env var can now be used with the docker mode. - body: >- - If provided, the KUBECONFIG environment variable was passed to the kubeauth-foreground service as a parameter. - However, since it didn't exist, the CLI was throwing an error when using telepresence connect --docker. - docs: https://github.com/telepresenceio/telepresence/pull/3300 - - type: bugfix - title: Fix deadlock while watching workloads - body: >- - The telepresence list --output json-stream wasn't releasing the session's lock after being - stopped, including with a telepresence quit. The user could be blocked as a result. - docs: https://github.com/telepresenceio/telepresence/pull/3298 - - type: bugfix - title: Change json output of telepresence list command - body: >- - Replace deprecated info in the JSON output of the telepresence list command. - - version: 2.14.4 - date: "2023-08-21" - notes: - - type: bugfix - title: Nil pointer exception when upgrading the traffic-manager. - body: >- - Upgrading the traffic-manager using telepresence helm upgrade would sometimes - result in a helm error message executing "telepresence/templates/intercept-env-configmap.yaml" - at <.Values.intercept.environment.excluded>: nil pointer evaluating interface {}.excluded" - docs: https://github.com/telepresenceio/telepresence/issues/3313 - - version: 2.14.2 - date: "2023-07-26" - notes: - - type: bugfix - title: Telepresence now use the OSS agent in its latest version by default. - body: >- - The traffic manager admin was forced to set it manually during the chart installation. - docs: https://github.com/telepresenceio/telepresence/issues/3271 - - version: 2.14.1 - date: "2023-07-07" - notes: - - type: feature - title: Envoy's http idle timout is now configurable. - body: >- - A new agent.helm.httpIdleTimeout setting was added to the Helm chart that controls - the proprietary Traffic agent's http idle timeout. The default of one hour, which in some situations - would cause a lot of resource consuming and lingering connections, was changed to 70 seconds. - - type: feature - title: Add more gauges to the Traffic manager's Prometheus client. - body: >- - Several gauges were added to the Prometheus client to make it easier to monitor - what the Traffic manager spends resources on. - - type: feature - title: Agent Pull Policy - body: >- - Add option to set traffic agent pull policy in helm chart. - - type: bugfix - title: Resource leak in the Traffic manager. - body: >- - Fixes a resource leak in the Traffic manager caused by lingering tunnels between the clients and - Traffic agents. The tunnels are now closed correctly when terminated from the side that created them. - - type: bugfix - title: Fixed problem setting traffic manager namespace using the kubeconfig extension. - body: >- - Fixes a regression introduced in version 2.10.5, making it impossible to set the traffic-manager namespace - using the telepresence.io kubeconfig extension. - docs: https://www.getambassador.io/docs/telepresence/latest/reference/config#manager - - version: 2.14.0 - date: "2023-06-12" - notes: - - type: feature - title: DNS configuration now supports excludes and mappings. - body: >- - The DNS configuration now supports two new fields, excludes and mappings. The excludes field allows you to - exclude a given list of hostnames from resolution, while the mappings field can be used to resolve a hostname with - another. - docs: https://github.com/telepresenceio/telepresence/pull/3172 - - - type: feature - title: Added the ability to exclude environment variables - body: >- - Added a new config map that can take an array of environment variables that will - then be excluded from an intercept that retrieves the environment of a pod. - - - type: bugfix - title: Fixed traffic-agent backward incompatibility issue causing lack of remote mounts - body: >- - A traffic-agent of version 2.13.3 (or 1.13.15) would not propagate the directories under - /var/run/secrets when used with a traffic manager older than 2.13.3. - - - type: bugfix - title: Fixed race condition causing segfaults on rare occasions when a tunnel stream timed out. - body: >- - A context cancellation could sometimes be trapped in a stream reader, causing it to incorrectly return - an undefined message which in turn caused the parent reader to panic on a nil pointer reference. - docs: https://github.com/telepresenceio/telepresence/pull/2963 - - - type: change - title: Routing conflict reporting. - body: >- - Telepresence will now attempt to detect and report routing conflicts with other running VPN software on client machines. - There is a new configuration flag that can be tweaked to allow certain CIDRs to be overridden by Telepresence. - - - type: change - title: test-vpn command deprecated - body: >- - Running telepresence test-vpn will now print a deprecation warning and exit. The command will be removed in a future release. - Instead, please configure telepresence for your VPN's routes. - - version: 2.13.3 - date: "2023-05-25" - notes: - - type: feature - title: Add imagePullSecrets to hooks - body: >- - Add .Values.hooks.curl.imagePullSecrets and .Values.hooks curl.imagePullSecrets to Helm values. - docs: https://github.com/telepresenceio/telepresence/pull/3079 - - - type: change - title: Change reinvocation policy to Never for the mutating webhook - body: >- - The default setting of the reinvocationPolicy for the mutating webhook dealing with agent injections changed from Never to IfNeeded. - - - type: bugfix - title: Fix mounting fail of IAM roles for service accounts web identity token - body: >- - The eks.amazonaws.com/serviceaccount volume injected by EKS is now exported and remotely mounted during an intercept. - docs: https://github.com/telepresenceio/telepresence/issues/3166 - - - type: bugfix - title: Correct namespace selector for cluster versions with non-numeric characters - body: >- - The mutating webhook now correctly applies the namespace selector even if the cluster version contains non-numeric characters. For example, it can now handle versions such as Major:"1", Minor:"22+". - docs: https://github.com/telepresenceio/telepresence/pull/3184 - - - type: bugfix - title: Enable IPv6 on the telepresence docker network - body: >- - The "telepresence" Docker network will now propagate DNS AAAA queries to the Telepresence DNS resolver when it runs in a Docker container. - docs: https://github.com/telepresenceio/telepresence/issues/3179 - - - type: bugfix - title: Fix the crash when intercepting with --local-only and --docker-run - body: >- - Running telepresence intercept --local-only --docker-run no longer results in a panic. - docs: https://github.com/telepresenceio/telepresence/issues/3171 - - - type: bugfix - title: Fix incorrect error message with local-only mounts - body: >- - Running telepresence intercept --local-only --mount false no longer results in an incorrect error message saying "a local-only intercept cannot have mounts". - docs: https://github.com/telepresenceio/telepresence/issues/3171 - - - type: bugfix - title: specify port in hook urls - body: >- - The helm chart now correctly handles custom agentInjector.webhook.port that was not being set in hook URLs. - docs: https://github.com/telepresenceio/telepresence/pull/3161 - - - type: bugfix - title: Fix wrong default value for disableGlobal and agentArrival - body: >- - Params .intercept.disableGlobal and .timeouts.agentArrival are now correctly honored. - - - version: 2.13.2 - date: "2023-05-12" - notes: - - type: bugfix - title: Authenticator Service Update - body: >- - Replaced / characters with a - when the authenticator service creates the kubeconfig in the Telepresence cache. - docs: https://github.com/telepresenceio/telepresence/pull/3167 - - - type: bugfix - title: Enhanced DNS Search Path Configuration for Windows (Auto, PowerShell, and Registry Options) - body: >- - Configurable strategy (auto, powershell. or registry) to set the global DNS search path on Windows. Default is auto which means try powershell first, and if it fails, fall back to registry. - docs: https://github.com/telepresenceio/telepresence/pull/3154 - - - type: feature - title: Configurable Traffic Manager Timeout in values.yaml - body: >- - The timeout for the traffic manager to wait for traffic agent to arrive can now be configured in the values.yaml file using timeouts.agentArrival. The default timeout is still 30 seconds. - docs: https://github.com/telepresenceio/telepresence/pull/3148 - - - type: bugfix - title: Enhanced Local Cluster Discovery for macOS and Windows - body: >- - The automatic discovery of a local container based cluster (minikube or kind) used when the Telepresence daemon runs in a container, now works on macOS and Windows, and with different profiles, ports, and cluster names - docs: https://github.com/telepresenceio/telepresence/pull/3165 - - - type: bugfix - title: FTP Stability Improvements - body: >- - Multiple simultaneous intercepts can transfer large files in bidirectionally and in parallel. - docs: https://github.com/telepresenceio/telepresence/pull/3157 - - - type: bugfix - title: Intercepted Persistent Volume Pods No Longer Cause Timeouts - body: >- - Pods using persistent volumes no longer causes timeouts when intercepted. - docs: https://github.com/telepresenceio/telepresence/pull/3157 - - - type: bugfix - title: Successful 'Telepresence Connect' Regardless of DNS Configuration - body: >- - Ensure that `telepresence connect`` succeeds even though DNS isn't configured correctly. - docs: https://github.com/telepresenceio/telepresence/pull/3154 - - - type: bugfix - title: Traffic-Manager's 'Close of Closed Channel' Panic Issue - body: >- - The traffic-manager would sometimes panic with a "close of closed channel" message and exit. - docs: https://github.com/telepresenceio/telepresence/pull/3160 - - - type: bugfix - title: Traffic-Manager's Type Cast Panic Issue - body: >- - The traffic-manager would sometimes panic and exit after some time due to a type cast panic. - docs: https://github.com/telepresenceio/telepresence/pull/3153 - - - type: bugfix - title: Login Friction - body: >- - Improve login behavior by clearing the saved intermediary API Keys when a user logins to force Telepresence to generate new ones. - - - version: 2.13.1 - date: "2023-04-20" - notes: - - type: change - title: Update ambassador-telepresence-agent to version 1.13.13 - body: >- - The malfunction of the Ambassador Telepresence Agent occurred as a result of an update which compressed the executable file. - - - version: 2.13.0 - date: "2023-04-18" - notes: - - type: feature - title: Better kind / minikube network integration with docker - body: >- - The Docker network used by a Kind or Minikube (using the "docker" driver) installation, is automatically detected and connected to a Docker container running the Telepresence daemon. - docs: https://github.com/telepresenceio/telepresence/pull/3104 - - - type: feature - title: New mapped namespace output - body: >- - Mapped namespaces are included in the output of the telepresence status command. - - - type: feature - title: Setting of the target IP of the intercept - docs: reference/intercepts/cli - body: >- - There's a new --address flag to the intercept command allowing users to set the target IP of the intercept. - - - type: feature - title: Multi-tenant support - body: >- - The client will no longer need cluster wide permissions when connected to a namespace scoped Traffic Manager. - - - type: bugfix - title: Cluster domain resolution bugfix - body: >- - The Traffic Manager now uses a fail-proof way to determine the cluster domain. - docs: https://github.com/telepresenceio/telepresence/issues/3114 - - - type: bugfix - title: Windows DNS - body: >- - DNS on windows is more reliable and performant. - docs: https://github.com/telepresenceio/telepresence/issues/2939 - - - type: bugfix - title: Agent injection with huge amount of deployments - body: >- - The agent is now correctly injected even with a high number of deployment starting at the same time. - docs: https://github.com/telepresenceio/telepresence/issues/3025 - - - type: bugfix - title: Self-contained kubeconfig with Docker - body: >- - The kubeconfig is made self-contained before running Telepresence daemon in a Docker container. - docs: https://github.com/telepresenceio/telepresence/issues/3099 - - - type: bugfix - title: Version command error - body: >- - The version command won't throw an error anymore if there is no kubeconfig file defined. - docs: https://github.com/telepresenceio/telepresence/issues/3095 - - - version: 2.12.2 - date: "2023-04-04" - notes: - - type: security - title: Update Golang build version to 1.20.3 - body: >- - Update Golang to 1.20.3 to address CVE-2023-24534, CVE-2023-24536, CVE-2023-24537, and CVE-2023-24538 - - version: 2.12.1 - date: "2023-03-22" - notes: - - type: feature - title: Additions to gather-logs - body: >- - Telepresence now includes the kubeauth logs when running - the gather-logs command - - type: bugfix - title: Environment Variables are now propagated to kubeauth - body: >- - Telepresence now propagates environment variables properly - to the kubeauth-foreground to be used with cluster authentication - - version: 2.12.0 - date: "2023-03-20" - notes: - - type: feature - title: Check for service connectivity independently from pod connectivity - body: >- - Telepresence now enables you to check for a service and pod's connectivity independently, so that it can proxy one without proxying the other. - docs: https://github.com/telepresenceio/telepresence/issues/2911 - - type: bugfix - title: Fix cluster authentication when running the telepresence daemon in a docker container. - body: >- - Authentication to EKS and GKE clusters have been fixed (k8s >= v1.26) - docs: https://github.com/telepresenceio/telepresence/pull/3055 - - type: bugfix - body: >- - Telepresence will not longer panic when a CNAME does not contain the .svc in it - title: Fix panic when CNAME of kubernetes.default doesn't contain .svc - docs: https://github.com/telepresenceio/telepresence/issues/3015 - - version: 2.11.1 - date: "2023-02-27" - notes: - - type: bugfix - title: Multiple architectures - docs: https://github.com/telepresenceio/telepresence/issues/3043 - body: >- - The multi-arch build for the ambassador-telepresence-manager and ambassador-telepresence-agent now - works for both amd64 and arm64. - - type: bugfix - title: Ambassador agent Helm chart duplicates - docs: https://github.com/telepresenceio/telepresence/issues/3046 - body: >- - Some labels in the Helm chart for the Ambassador Agent were duplicated, causing problems for FluxCD. - - version: 2.11.0 - date: "2023-02-22" - notes: - - type: feature - title: Support for arm64 (Apple Silicon) - body: >- - The ambassador-telepresence-manager and ambassador-telepresence-agent are now distributed as - multi-architecture images and can run natively on both linux/amd64 and linux/arm64. - - type: bugfix - title: Connectivity check can break routing in VPN setups - docs: https://github.com/telepresenceio/telepresence/issues/3006 - body: >- - The connectivity check failed to recognize that the connected peer wasn't a traffic-manager. Consequently, - it didn't proxy the cluster because it incorrectly assumed that a successful connect meant cluster connectivity, - - type: bugfix - title: VPN routes not detected by telepresence test-vpn on macOS - docs: https://github.com/telepresenceio/telepresence/pull/3038 - body: >- - The telepresence test-vpn did not include routes of type link when checking for subnet - conflicts. - - version: 2.10.5 - date: "2023-02-06" - notes: - - type: bugfix - title: Daemon reconnection fix - body: >- - Fixed a bug that prevented the local daemons from automatically reconnecting to the traffic manager when the network connection was lost. - - version: 2.10.4 - date: "2023-01-20" - notes: - - type: bugfix - title: Backward compatibility restored - body: >- - Telepresence can now create intercepts with traffic-managers of version 2.9.5 and older. - - version: 2.10.2 - date: "2023-01-16" - notes: - - type: bugfix - title: version consistency in helm commands - body: >- - Ensure that CLI and user-daemon binaries are the same version when running
telepresence helm install - or telepresence helm upgrade. - docs: https://github.com/telepresenceio/telepresence/pull/2975 - - type: bugfix - title: Release Process - body: >- - Fixed an issue that prevented the --use-saved-intercept flag from working. - - version: 2.10.1 - date: "2023-01-11" - notes: - - type: bugfix - title: Release Process - body: >- - Fixed a regex in our release process that prevented 2.10.0 promotion. - - version: 2.10.0 - date: "2023-01-11" - notes: - - type: feature - title: Added `insert` and `upgrade` Subcommands to `telepresence helm` - body: >- - The `telepresence helm` sub-commands `insert` and `upgrade` now accepts all types of helm `--set-XXX` flags. - - type: feature - title: Added Image Pull Secrets to Helm Chart - body: >- - Image pull secrets for the traffic-agent can now be added using the Helm chart setting `agent.image.pullSecrets`. - - type: change - title: Rename Configmap - body: >- - The configmap `traffic-manager-clients` has been renamed to `traffic-manager`. - - type: change - title: Webhook Namespace Field - body: >- - If the cluster is Kubernetes 1.21 or later, the mutating webhook will find the correct namespace using the label `kubernetes.io/metadata.name` rather than `app.kuberenetes.io/name`. - docs: https://github.com/telepresenceio/telepresence/issues/2913 - - type: change - title: Rename Webhook - body: >- - The name of the mutating webhook now contains the namespace of the traffic-manager so that the webhook is easier to identify when there are multiple namespace scoped telepresence installations in the cluster. - - type: change - title: OSS Binaries - body: >- - The OSS Helm chart is no longer pushed to the datawire Helm repository. It will instead be pushed from the telepresence proprietary repository. The OSS Helm chart is still what's embedded in the OSS telepresence client. - docs: https://github.com/telepresenceio/telepresence/pull/2943 - - type: bugfix - title: Fix Panic Using `--docker-run` - body: >- - Telepresence no longer panics when `--docker-run` is combined with `--name ` instead of `--name=`. - docs: https://github.com/telepresenceio/telepresence/issues/2953 - - type: bugfix - title: Stop assuming cluster domain - body: >- - Telepresence traffic-manager extracts the cluster domain (e.g. "cluster.local") using a CNAME lookup for "kubernetes.default" instead of "kubernetes.default.svc". - docs: https://github.com/telepresenceio/telepresence/pull/2959 - - type: bugfix - title: Uninstall hook timeout - body: >- - A timeout was added to the pre-delete hook `uninstall-agents`, so that a helm uninstall doesn't hang when there is no running traffic-manager. - docs: https://github.com/telepresenceio/telepresence/pull/2937 - - type: bugfix - title: Uninstall hook check - body: >- - The `Helm.Revision` is now used to prevent that Helm hook calls are served by the wrong revision of the traffic-manager. - docs: https://github.com/telepresenceio/telepresence/issues/2954 - - version: 2.9.5 - date: "2022-12-08" - notes: - - type: security - title: Update to golang v1.19.4 - body: >- - Apply security updates by updating to golang v1.19.4 - docs: https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU - - type: bugfix - title: GCE authentication - body: >- - Fixed a regression, that was introduced in 2.9.3, preventing use of gce authentication without also having a config element present in the gce configuration in the kubeconfig. - - version: 2.9.4 - date: "2022-12-02" - notes: - - type: feature - title: Subnet detection strategy - body: >- - The traffic-manager can automatically detect that the node subnets are different from the pod subnets, and switch detection strategy to instead use subnets that cover the pod IPs. - - type: bugfix - title: Fix `--set` flag for `telepresence helm install` - body: >- - The `telepresence helm` command `--set x=y` flag didn't correctly set values of other types than `string`. The code now uses standard Helm semantics for this flag. - - type: bugfix - title: Fix `agent.image` setting propigation - body: >- - Telepresence now uses the correct `agent.image` properties in the Helm chart when copying agent image settings from the `config.yml` file. - - type: bugfix - title: Delay file sharing until needed - body: >- - Initialization of FTP type file sharing is delayed, so that setting it using the Helm chart value `intercept.useFtp=true` works as expected. - - type: bugfix - title: Cleanup on `telepresence quit` - body: >- - The port-forward that is created when Telepresence connects to a cluster is now properly closed when `telepresence quit` is called. - - type: bugfix - title: Watch `config.yml` without panic - body: >- - The user daemon no longer panics when the `config.yml` is modified at a time when the user daemon is running but no session is active. - - type: bugfix - title: Thread safety - body: >- - Fix race condition that would occur when `telepresence connect` `telepresence leave` was called several times in rapid succession. - - version: 2.9.3 - date: "2022-11-23" - notes: - - type: feature - title: Helm options for `livenessProbe` and `readinessProbe` - body: >- - The helm chart now supports `livenessProbe` and `readinessProbe` for the traffic-manager deployment, so that the pod automatically restarts if it doesn't respond. - - type: change - title: Improved network communication - body: >- - The root daemon now communicates directly with the traffic-manager instead of routing all outbound traffic through the user daemon. - - type: bugfix - title: Root daemon debug logging - body: >- - Using `telepresence loglevel LEVEL` now also sets the log level in the root daemon. - - type: bugfix - title: Multivalue flag value propagation - body: >- - Multi valued kubernetes flags such as `--as-group` are now propagated correctly. - - type: bugfix - title: Root daemon stability - body: >- - The root daemon would sometimes hang indefinitely when quit and connect were called in rapid succession. - - type: bugfix - title: Base DNS resolver - body: >- - Don't use `systemd.resolved` base DNS resolver unless cluster is proxied. - - version: 2.9.2 - date: "2022-11-16" - notes: - - type: bugfix - title: Fix panic - body: >- - Fix panic when connecting to an older traffic-manager. - - type: bugfix - title: Fix header flag - body: >- - Fix an issue where the `http-header` flag sometimes wouldn't propagate correctly. - - version: 2.9.1 - date: "2022-11-16" - notes: - - type: bugfix - title: Connect failures due to missing auth provider. - body: >- - The regression in 2.9.0 that caused a `no Auth Provider found for name “gcp”` error when connecting was fixed. - - version: 2.9.0 - date: "2022-11-15" - notes: - - type: feature - title: New command to view client configuration. - body: >- - A new telepresence config view was added to make it easy to view the current - client configuration. - docs: new-in-2.9#view-the-client-configuration - - type: feature - title: Configure Clients using the Helm chart. - body: >- - The traffic-manager can now configure all clients that connect through the client: map in - the values.yaml file. - docs: reference/cluster-config#client-configuration - - type: feature - title: The Traffic manager version is more visible. - body: >- - The command telepresence version will now include the version of the traffic manager when - the client is connected to a cluster. - - type: feature - title: Command output in YAML format. - body: >- - The global --output flag now accepts both yaml and json. - docs: new-in-2.9#yaml-output - - type: change - title: Deprecated status command flag - body: >- - The telepresence status --json flag is deprecated. Use telepresence status --output=json instead. - - type: bugfix - title: Unqualified service name resolution in docker. - body: >- - Unqualified service names now resolves OK from the docker container when using telepresence intercept --docker-run. - docs: https://github.com/telepresenceio/telepresence/issues/2870 - - type: bugfix - title: Output no longer mixes plaintext and json. - body: >- - Informational messages that don't really originate from the command, such as "Launching Telepresence Root Daemon", - or "An update of telepresence ...", are discarded instead of being printed as plain text before the actual formatted - output when using the --output=json. - docs: https://github.com/telepresenceio/telepresence/issues/2854 - - type: bugfix - title: No more panic when invalid port names are detected. - body: >- - A `telepresence intercept` of services with invalid port no longer causes a panic. - docs: https://github.com/telepresenceio/telepresence/issues/2880 - - type: bugfix - title: Proper errors for bad output formats. - body: >- - An attempt to use an invalid value for the global --output flag now renders a proper error message. - - type: bugfix - title: Remove lingering DNS config on macOS. - body: >- - Files lingering under /etc/resolver as a result of ungraceful shutdown of the root daemon on macOS, are - now removed when a new root daemon starts. - - version: 2.8.5 - date: "2022-11-2" - notes: - - type: security - title: CVE-2022-41716 - body: >- - Updated Golang to 1.19.3 to address CVE-2022-41716. - - version: 2.8.4 - date: "2022-11-2" - notes: - - type: bugfix - title: Release Process - body: >- - This release resulted in changes to our release process. - - version: 2.8.3 - date: "2022-10-27" - notes: - - type: feature - title: Ability to disable global intercepts. - body: >- - Global intercepts (a.k.a. TCP intercepts) can now be disabled by using the new Helm chart setting intercept.disableGlobal. - docs: https://github.com/telepresenceio/telepresence/issues/2140 - - type: feature - title: Configurable mutating webhook port - body: >- - The port used for the mutating webhook can be configured using the Helm chart setting - agentInjector.webhook.port. - docs: install/helm - - type: change - title: Mutating webhook port defaults to 443 - body: >- - The default port for the mutating webhook is now 443. It used to be 8443. - - type: change - title: Agent image configuration mandatory in air-gapped environments. - body: >- - The traffic-manager will no longer default to use the tel2 image for the traffic-agent when it is - unable to connect to Ambassador Cloud. Air-gapped environments must declare what image to use in the Helm chart. - - type: bugfix - title: Can now connect to non-helm installs - body: >- - telepresence connect now works as long as the traffic manager is installed, even if - it wasn't installed via >code>helm install - docs: https://github.com/telepresenceio/telepresence/issues/2824 - - type: bugfix - title: check-vpn crash fixed - body: >- - telepresence check-vpn no longer crashes when the daemons don't start properly. - - version: 2.8.2 - date: "2022-10-15" - notes: - - type: bugfix - title: Reinstate 2.8.0 - body: >- - There was an issue downloading the free enhanced client. This problem was fixed, 2.8.0 was reinstated - - version: 2.8.1 - date: "2022-10-14" - notes: - - type: bugfix - title: Rollback 2.8.0 - body: >- - Rollback 2.8.0 while we investigate an issue with ambassador cloud. - - version: 2.8.0 - date: "2022-10-14" - notes: - - type: feature - title: Improved DNS resolver - body: >- - The Telepresence DNS resolver is now capable of resolving queries of type A, AAAA, CNAME, - MX, NS, PTR, SRV, and TXT. - docs: reference/dns - - type: feature - title: New `client` structure in Helm chart - body: >- - A new client struct was added to the Helm chart. It contains a connectionTTL that controls - how long the traffic manager will retain a client connection without seeing any sign of life from the client. - docs: reference/cluster-config#Client-Configuration - - type: feature - title: Include and exclude suffixes configurable using the Helm chart. - body: >- - A dns element was added to the client struct in Helm chart. It contains an includeSuffixes and - an excludeSuffixes value that controls what type of names that the DNS resolver in the client will delegate to - the cluster. - docs: reference/cluster-config#DNS - - type: feature - title: Configurable traffic-manager API port - body: >- - The API port used by the traffic-manager is now configurable using the Helm chart value apiPort. - The default port is 8081. - docs: https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence - - type: feature - title: Envoy server and admin port configuration. - body: >- - An new agent struct was added to the Helm chart. It contains an `envoy` structure where the server and - admin port of the Envoy proxy running in the enhanced traffic-agent can be configured. - docs: reference/cluster-config#Envoy-Configuration - - type: change - title: Helm chart `dnsConfig` moved to `client.routing`. - body: >- - The Helm chart dnsConfig was deprecated but retained for backward compatibility. The fields alsoProxySubnets - and neverProxySubnets can now be found under routing in the client struct. - docs: reference/cluster-config#Routing - - type: change - title: Helm chart `agentInjector.agentImage` moved to `agent.image`. - body: >- - The Helm chart agentInjector.agentImage was moved to agent.image. The old value is deprecated but - retained for backward compatibility. - docs: reference/cluster-config#Image-Configuration - - type: change - title: Helm chart `agentInjector.appProtocolStrategy` moved to `agent.appProtocolStrategy`. - body: >- - The Helm chart agentInjector.appProtocolStrategy was moved to agent.appProtocolStrategy. The old - value is deprecated but retained for backward compatibility. - docs: reference/cluster-config#Application-Protocol-Selection - - type: change - title: Helm chart `dnsServiceName`, `dnsServiceNamespace`, and `dnsServiceIP` removed. - body: >- - The Helm chart dnsServiceName, dnsServiceNamespace, and dnsServiceIP has been removed, because - they are no longer needed. The TUN-device will use the traffic-manager pod-IP on platforms where it needs to - dedicate an IP for its local resolver. - - type: change - title: Quit daemons with `telepresence quit -s` - body: >- - The former options `-u` and `-r` for `telepresence quit` has been deprecated and replaced with one option `-s` which will - quit both the root daemon and the user daemon. - - type: bugfix - title: Environment variable interpolation in pods now works. - body: >- - Environment variable interpolation now works for all definitions that are copied from pod containers - into the injected traffic-agent container. - - type: bugfix - title: Early detection of namespace conflict - body: >- - An attempt to create simultaneous intercepts that span multiple namespace on the same workstation - is detected early and prohibited instead of resulting in failing DNS lookups later on. - - type: bugfix - title: Annoying log message removed - body: >- - Spurious and incorrect ""!! SRV xxx"" messages will no longer appear in the logs when the reason - is normal context cancellation. - - type: bugfix - title: Single name DNS resolution in Docker on Linux host - body: >- - Single label names now resolves correctly when using Telepresence in Docker on a Linux host - - type: bugfix - title: Misnomer `appPortStrategy` in Helm chart renamed to `appProtocolStrategy`. - body: >- - The Helm chart value appProtocolStrategy is now correctly named (used to be appPortStategy) - - version: 2.7.6 - date: "2022-09-16" - notes: - - type: feature - title: Helm chart resource entries for injected agents - body: >- - The resources for the traffic-agent container and the optional init container can be - specified in the Helm chart using the resources and initResource fields - of the agentInjector.agentImage - - type: feature - title: Cluster event propagation when injection fails - body: >- - When the traffic-manager fails to inject a traffic-agent, the cause for the failure is - detected by reading the cluster events, and propagated to the user. - - type: feature - title: FTP-client instead of sshfs for remote mounts - body: >- - Telepresence can now use an embedded FTP client and load an existing FUSE library instead of running - an external sshfs or sshfs-win binary. This feature is experimental in 2.7.x - and enabled by setting intercept.useFtp to true> in the config.yml. - - type: change - title: Upgrade of winfsp - body: >- - Telepresence on Windows upgraded winfsp from version 1.10 to 1.11 - - type: bugfix - title: Removal of invalid warning messages - body: >- - Running CLI commands on Apple M1 machines will no longer throw warnings about /proc/cpuinfo - and /proc/self/auxv. - - version: 2.7.5 - date: "2022-09-14" - notes: - - type: change - title: Rollback of release 2.7.4 - body: >- - This release is a rollback of the changes in 2.7.4, so essentially the same as 2.7.3 - - version: 2.7.4 - date: "2022-09-14" - notes: - - type: change - body: >- - This release was broken on some platforms. Use 2.7.6 instead. - - version: 2.7.3 - date: "2022-09-07" - notes: - - type: bugfix - title: PTY for CLI commands - body: >- - CLI commands that are executed by the user daemon now use a pseudo TTY. This enables - docker run -it to allocate a TTY and will also give other commands like bash read the - same behavior as when executed directly in a terminal. - docs: https://github.com/telepresenceio/telepresence/issues/2724 - - type: bugfix - title: Traffic Manager useless warning silenced - body: >- - The traffic-manager will no longer log numerous warnings saying Issuing a - systema request without ApiKey or InstallID may result in an error. - - type: bugfix - title: Traffic Manager useless error silenced - body: >- - The traffic-manager will no longer log an error saying Unable to derive subnets - from nodes when the podCIDRStrategy is auto and it chooses to instead derive the - subnets from the pod IPs. - - version: 2.7.2 - date: "2022-08-25" - notes: - - type: feature - title: Autocompletion scripts - body: >- - Autocompletion scripts can now be generated with telepresence completion SHELL where SHELL can be bash, zsh, fish or powershell. - - type: feature - title: Connectivity check timeout - body: >- - The timeout for the initial connectivity check that Telepresence performs - in order to determine if the cluster's subnets are proxied or not can now be configured - in the config.yml file using timeouts.connectivityCheck. The default timeout was - changed from 5 seconds to 500 milliseconds to speed up the actual connect. - docs: reference/config#timeouts - - type: change - title: gather-traces feedback - body: >- - The command telepresence gather-traces now prints out a message on success. - docs: troubleshooting#distributed-tracing - - type: change - title: upload-traces feedback - body: >- - The command telepresence upload-traces now prints out a message on success. - docs: troubleshooting#distributed-tracing - - type: change - title: gather-traces tracing - body: >- - The command telepresence gather-traces now traces itself and reports errors with trace gathering. - docs: troubleshooting#distributed-tracing - - type: change - title: CLI log level - body: >- - The cli.log log is now logged at the same level as the connector.log - docs: reference/config#log-levels - - type: bugfix - title: Telepresence --help fixed - body: >- - telepresence --help now works once more even if there's no user daemon running. - docs: https://github.com/telepresenceio/telepresence/issues/2735 - - type: bugfix - title: Stream cancellation when no process intercepts - body: >- - Streams created between the traffic-agent and the workstation are now properly closed - when no interceptor process has been started on the workstation. This fixes a potential problem where - a large number of attempts to connect to a non-existing interceptor would cause stream congestion - and an unresponsive intercept. - - type: bugfix - title: List command excludes the traffic-manager - body: >- - The telepresence list command no longer includes the traffic-manager deployment. - - version: 2.7.1 - date: "2022-08-10" - notes: - - type: change - title: Reinstate telepresence uninstall - body: >- - Reinstate telepresence uninstall with --everything depreciated - - type: change - title: Reduce telepresence helm uninstall - body: >- - telepresence helm uninstall will only uninstall the traffic-manager helm chart and no longer accepts the --everything, --agent, or --all-agents flags. - - type: bugfix - title: Auto-connect for telepresence intercpet - body: >- - telepresence intercept will attempt to connect to the traffic manager before creating an intercept. - - version: 2.7.0 - date: "2022-08-07" - notes: - - type: feature - title: Distributed Tracing - body: >- - The Telepresence components now collect OpenTelemetry traces. - Up to 10MB of trace data are available at any given time for collection from - components. telepresence gather-traces is a new command that will collect - all that data and place it into a gzip file, and telepresence upload-traces is - a new command that will push the gzipped data into an OTLP collector. - docs: troubleshooting#distributed-tracing - - type: feature - title: Helm install - body: >- - A new telepresence helm command was added to provide an easy way to install, upgrade, or uninstall the telepresence traffic-manager. - docs: install/manager - - type: feature - title: Ignore Volume Mounts - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-ignore-volume-mounts, that can be used to make the injector ignore specified volume mounts denoted by a comma-separated string. - - type: feature - title: telepresence pod-daemon - body: >- - The Docker image now contains a new program in addition to - the existing traffic-manager and traffic-agent: the pod-daemon. The - pod-daemon is a trimmed-down version of the user-daemon that is - designed to run as a sidecar in a Pod, enabling CI systems to create - preview deploys. - - type: feature - title: Prometheus support for traffic manager - body: >- - Added prometheus support to the traffic manager. - - type: change - title: No install on telepresence connect - body: >- - The traffic manager is no longer automatically installed into the cluster. Connecting or creating an intercept in a cluster without a traffic manager will return an error. - docs: install/manager - - type: change - title: Helm Uninstall - body: >- - The command telepresence uninstall has been moved to telepresence helm uninstall. - docs: install/manager - - type: bugfix - title: readOnlyRootFileSystem mounts work - body: >- - Add an emptyDir volume and volume mount under /tmp on the agent sidecar so it works with `readOnlyRootFileSystem: true` - docs: https://github.com/telepresenceio/telepresence/pull/2666 - - version: 2.6.8 - date: "2022-06-23" - notes: - - type: feature - title: Specify Your DNS - body: >- - The name and namespace for the DNS Service that the traffic-manager uses in DNS auto-detection can now be specified. - - type: feature - title: Specify a Fallback DNS - body: >- - Should the DNS auto-detection logic in the traffic-manager fail, users can now specify a fallback IP to use. - - type: feature - title: Intercept UDP Ports - body: >- - It is now possible to intercept UDP ports with Telepresence and also use --to-pod to forward UDP traffic from ports on localhost. - - type: change - title: Additional Helm Values - body: >- - The Helm chart will now add the nodeSelector, affinity and tolerations values to the traffic-manager's post-upgrade-hook and pre-delete-hook jobs. - - type: bugfix - title: Agent Injection Bugfix - body: >- - Telepresence no longer fails to inject the traffic agent into the pod generated for workloads that have no volumes and `automountServiceAccountToken: false`. - - version: 2.6.7 - date: "2022-06-22" - notes: - - type: bugfix - title: Persistant Sessions - body: >- - The Telepresence client will remember and reuse the traffic-manager session after a network failure or other reason that caused an unclean disconnect. - - type: bugfix - title: DNS Requests - body: >- - Telepresence will no longer forward DNS requests for "wpad" to the cluster. - - type: bugfix - title: Graceful Shutdown - body: >- - The traffic-agent will properly shut down if one of its goroutines errors. - - version: 2.6.6 - date: "2022-06-9" - notes: - - type: bugfix - title: Env Var `TELEPRESENCE_API_PORT` - body: >- - The propagation of the TELEPRESENCE_API_PORT environment variable now works correctly. - - type: bugfix - title: Double Printing `--output json` - body: >- - The --output json global flag no longer outputs multiple objects - - version: 2.6.5 - date: "2022-06-03" - notes: - - type: feature - title: Helm Option -- `reinvocationPolicy` - body: >- - The reinvocationPolicy or the traffic-agent injector webhook can now be configured using the Helm chart. - docs: install/helm - - type: feature - title: Helm Option -- Proxy Certificate - body: >- - The traffic manager now accepts a root CA for a proxy, allowing it to connect to ambassador cloud from behind an HTTPS proxy. This can be configured through the helm chart. - docs: install/helm - - type: feature - title: Helm Option -- Agent Injection - body: >- - A policy that controls when the mutating webhook injects the traffic-agent was added, and can be configured in the Helm chart. - docs: install/helm - - type: change - title: Windows Tunnel Version Upgrade - body: >- - Telepresence on Windows upgraded wintun.dll from version 0.12 to version 0.14.1 - - type: change - title: Helm Version Upgrade - body: >- - Telepresence upgraded its embedded Helm from version 3.8.1 to 3.9 - - type: change - title: Kubernetes API Version Upgrade - body: >- - Telepresence upgraded its embedded Kubernetes API from version 0.23.4 to 0.24.1 - - type: feature - title: Flag `--watch` Added to `list` Command - body: >- - Added a --watch flag to telepresence list that can be used to watch interceptable workloads in a namespace. - - type: change - title: Depreciated `images.webhookAgentImage` - body: >- - The Telepresence configuration setting for `images.webhookAgentImage` is now deprecated. Use `images.agentImage` instead. - - type: bugfix - title: Default `reinvocationPolicy` Set to Never - body: >- - The reinvocationPolicy or the traffic-agent injector webhook now defaults to Never insteadof IfNeeded so that LimitRanges on namespaces can inject a missing resources element into the injected traffic-agent container. - - type: bugfix - title: UDP - body: >- - UDP based communication with services in the cluster now works as expected. - - type: bugfix - title: Telepresence `--help` - body: >- - The command help will only show Kubernetes flags on the commands that supports them - - type: change - title: Error Count - body: >- - Only the errors from the last session will be considered when counting the number of errors in the log after a command failure. - - version: 2.6.4 - date: "2022-05-23" - notes: - - type: bugfix - title: Upgrade RBAC Permissions - body: >- - The traffic-manager RBAC grants permissions to update services, deployments, replicatsets, and statefulsets. Those permissions are needed when the traffic-manager upgrades from versions < 2.6.0 and can be revoked after the upgrade. - - version: 2.6.3 - date: "2022-05-20" - notes: - - type: bugfix - title: Relative Mount Paths - body: >- - The --mount intercept flag now handles relative mount points correctly on non-windows platforms. Windows still require the argument to be a drive letter followed by a colon. - - type: bugfix - title: Traffic Agent Config - body: >- - The traffic-agent's configuration update automatically when services are added, updated or deleted. - - type: bugfix - title: Container Injection for Numeric Ports - body: >- - Telepresence will now always inject an initContainer when the service's targetPort is numeric - - type: bugfix - title: Matching Services - body: >- - Workloads that have several matching services pointing to the same target port are now handled correctly. - - type: bugfix - title: Unexpected Panic - body: >- - A potential race condition causing a panic when closing a DNS connection is now handled correctly. - - type: bugfix - title: Mount Volume Cleanup - body: >- - A container start would sometimes fail because and old directory remained in a mounted temp volume. - - version: 2.6.2 - date: "2022-05-17" - notes: - - type: bugfix - title: Argo Injection - body: >- - Workloads controlled by workloads like Argo Rollout are injected correctly. - - type: bugfix - title: Agent Port Mapping - body: >- - Multiple services appointing the same container port no longer result in duplicated ports in an injected pod. - - type: bugfix - title: GRPC Max Message Size - body: >- - The telepresence list command no longer errors out with "grpc: received message larger than max" when listing namespaces with a large number of workloads. - - version: 2.6.1 - date: "2022-05-16" - notes: - - type: bugfix - title: KUBECONFIG environment variable - body: >- - Telepresence will now handle multiple path entries in the KUBECONFIG environment correctly. - - version: 2.6.0 - date: "2022-05-13" - notes: - - type: feature - title: Intercept multiple containers in a pod, and multiple ports per container - body: >- - Telepresence can now intercept multiple services and/or service-ports that connect to the same pod. - docs: new-in-2.6#intercept-multiple-containers-and-ports - - type: feature - title: The Traffic Agent sidecar is always injected by the Traffic Manager's mutating webhook - body: >- - The client will no longer modify deployments, replicasets, or statefulsets in order to - inject a Traffic Agent into an intercepted pod. Instead, all injection is now performed by a mutating webhook. As a result, - the client now needs less permissions in the cluster. - docs: install/upgrade#important-note-about-upgrading-to-2.6.0 - - type: change - title: Automatic upgrade of Traffic Agents - body: >- - When upgrading, all workloads with injected agents will have their agent "uninstalled" automatically. - The mutating webhook will then ensure that their pods will receive an updated Traffic Agent. - docs: new-in-2.6#no-more-workload-modifications - - type: change - title: No default image in the Helm chart - body: >- - The helm chart no longer has a default set for the agentInjector.image.name, and unless it's set, the - traffic-manager will ask Ambassador Could for the preferred image. - docs: new-in-2.6#smarter-agent - - type: change - title: Upgrade to Helm version 3.8.1 - body: The Telepresence client now uses Helm version 3.8.1 when auto-installing the Traffic Manager. - - type: bugfix - title: Remote mounts will now function correctly with custom securityContext - body: >- - The bug causing permission problems when the Traffic Agent is in a Pod with a custom securityContext has been fixed. - - type: bugfix - title: Improved presentation of flags in CLI help - body: The help for commands that accept Kubernetes flags will now display those flags in a separate group. - - type: bugfix - title: Better termination of process parented by intercept - body: >- - Occasionally an intercept will spawn a command using -- on the command line, often in another console. - When you use telepresence leave or telepresence quit while the intercept with the spawned command is still active, - Telepresence will now terminate that the command because it's considered to be parented by the intercept that is being removed. - - version: 2.5.8 - date: "2022-04-27" - notes: - - type: bugfix - title: Folder creation on `telepresence login` - body: >- - Fixed a bug where the telepresence config folder would not be created if the user ran telepresence login before other commands. - - version: 2.5.7 - date: "2022-04-25" - notes: - - type: change - title: RBAC requirements - body: >- - A namespaced traffic-manager will no longer require cluster wide RBAC. Only Roles and RoleBindings are now used. - - type: bugfix - title: Windows DNS - body: >- - The DNS recursion detector didn't work correctly on Windows, resulting in sporadic failures to resolve names that were resolved correctly at other times. - - type: bugfix - title: Session TTL and Reconnect - body: >- - A telepresence session will now last for 24 hours after the user's last connectivity. If a session expires, the connector will automatically try to reconnect. - - version: 2.5.6 - date: "2022-04-18" - notes: - - type: change - title: Less Watchers - body: >- - Telepresence agents watcher will now only watch namespaces that the user has accessed since the last connect. - - type: bugfix - title: More Efficient `gather-logs` - body: >- - The gather-logs command will no longer send any logs through gRPC. - - version: 2.5.5 - date: "2022-04-08" - notes: - - type: change - title: Traffic Manager Permissions - body: >- - The traffic-manager now requires permissions to read pods across namespaces even if installed with limited permissions - - type: bugfix - title: Linux DNS Cache - body: >- - The DNS resolver used on Linux with systemd-resolved now flushes the cache when the search path changes. - - type: bugfix - title: Automatic Connect Sync - body: >- - The telepresence list command will produce a correct listing even when not preceded by a telepresence connect. - - type: bugfix - title: Disconnect Reconnect Stability - body: >- - The root daemon will no longer get into a bad state when a disconnect is rapidly followed by a new connect. - - type: bugfix - title: Limit Watched Namespaces - body: >- - The client will now only watch agents from accessible namespaces, and is also constrained to namespaces explicitly mapped using the connect command's --mapped-namespaces flag. - - type: bugfix - title: Limit Namespaces used in `gather-logs` - body: >- - The gather-logs command will only gather traffic-agent logs from accessible namespaces, and is also constrained to namespaces explicitly mapped using the connect command's --mapped-namespaces flag. - - version: 2.5.4 - date: "2022-03-29" - notes: - - type: bugfix - title: Linux DNS Concurrency - body: >- - The DNS fallback resolver on Linux now correctly handles concurrent requests without timing them out - - type: bugfix - title: Non-Functional Flag - body: >- - The ingress-l5 flag will no longer be forcefully set to equal the --ingress-host flag - - type: bugfix - title: Automatically Remove Failed Intercepts - body: >- - Intercepts that fail to create are now consistently removed to prevent non-working dangling intercepts from sticking around. - - type: bugfix - title: Agent UID - body: >- - Agent container is no longer sensitive to a random UID or an UID imposed by a SecurityContext. - - type: bugfix - title: Gather-Logs Output Filepath - body: >- - Removed a bad concatenation that corrupted the output path of telepresence gather-logs. - - type: change - title: Remove Unnecessary Error Advice - body: >- - An advice to "see logs for details" is no longer printed when the argument count is incorrect in a CLI command. - - type: bugfix - title: Garbage Collection - body: >- - Client and agent sessions no longer leaves dangling waiters in the traffic-manager when they depart. - - type: bugfix - title: Limit Gathered Logs - body: >- - The client's gather logs command and agent watcher will now respect the configured grpc.maxReceiveSize - - type: change - title: In-Cluster Checks - body: >- - The TUN device will no longer route pod or service subnets if it is running in a machine that's already connected to the cluster - - type: change - title: Expanded Status Command - body: >- - The status command includes the install id, user id, account id, and user email in its result, and can print output as JSON - - type: change - title: List Command Shows All Intercepts - body: >- - The list command, when used with the --intercepts flag, will list the users intercepts from all namespaces - - version: 2.5.3 - date: "2022-02-25" - notes: - - type: bugfix - title: TCP Connectivity - body: >- - Fixed bug in the TCP stack causing timeouts after repeated connects to the same address - - type: feature - title: Linux Binaries - body: >- - Client-side binaries for the arm64 architecture are now available for linux - - version: 2.5.2 - date: "2022-02-23" - notes: - - type: bugfix - title: DNS server bugfix - body: >- - Fixed a bug where Telepresence would use the last server in resolv.conf - - version: 2.5.1 - date: "2022-02-19" - notes: - - type: bugfix - title: Fix GKE auth issue - body: >- - Fixed a bug where using a GKE cluster would error with: No Auth Provider found for name "gcp" - - version: 2.5.0 - date: "2022-02-18" - notes: - - type: feature - title: Intercept metadata - body: >- - The flag --http-meta can be used to declare metadata key value pairs that will be returned by the Telepresence rest - API endpoint /intercept-info - docs: reference/restapi#intercept-info - - type: change - title: Client RBAC watch - body: >- - The verb "watch" was added to the set of required verbs when accessing services and workloads for the client RBAC - ClusterRole - docs: reference/rbac - - type: change - title: Dropped backward compatibility with versions <=2.4.4 - body: >- - Telepresence is no longer backward compatible with versions 2.4.4 or older because the deprecated multiplexing tunnel - functionality was removed. - - type: change - title: No global networking flags - body: >- - The global networking flags are no longer used and using them will render a deprecation warning unless they are supported by the - command. The subcommands that support networking flags are connect, current-cluster-id, - and genyaml. - - type: bugfix - title: Output of status command - body: >- - The also-proxy and never-proxy subnets are now displayed correctly when using the - telepresence status command. - - type: bugfix - title: SETENV sudo privilege no longer needed - body: >- - Telepresence longer requires SETENV privileges when starting the root daemon. - - type: bugfix - title: Network device names containing dash - body: >- - Telepresence will now parse device names containing dashes correctly when determining routes that it should never block. - - type: bugfix - title: Linux uses cluster.local as domain instead of search - body: >- - The cluster domain (typically "cluster.local") is no longer added to the DNS search on Linux using - systemd-resolved. Instead, it is added as a domain so that names ending with it are routed - to the DNS server. - - version: 2.4.11 - date: "2022-02-10" - notes: - - type: change - title: Add additional logging to troubleshoot intermittent issues with intercepts - body: >- - We've noticed some issues with intercepts in v2.4.10, so we are releasing a version - with enhanced logging to help debug and fix the issue. - - version: 2.4.10 - date: "2022-01-13" - notes: - - type: feature - title: New --http-plaintext option - body: >- - The flag --http-plaintext can be used to ensure that an intercept uses plaintext http or grpc when - communicating with the workstation process. - docs: reference/intercepts/#tls - - type: feature - title: Configure the default intercept port - body: >- - The port used by default in the telepresence intercept command (8080), can now be changed by setting - the intercept.defaultPort in the config.yml file. - docs: reference/config/#intercept - - type: change - title: Telepresence CI now uses Github Actions - body: >- - Telepresence now uses Github Actions for doing unit and integration testing. It is - now easier for contributors to run tests on PRs since maintainers can add an - "ok to test" label to PRs (including from forks) to run integration tests. - docs: https://github.com/telepresenceio/telepresence/actions - image: telepresence-2.4.10-actions.png - - type: bugfix - title: Check conditions before asking questions - body: >- - User will not be asked to log in or add ingress information when creating an intercept until a check has been - made that the intercept is possible. - docs: reference/intercepts/ - - type: bugfix - title: Fix invalid log statement - body: >- - Telepresence will no longer log invalid: "unhandled connection control message: code DIAL_OK" errors. - - type: bugfix - title: Log errors from sshfs/sftp - body: >- - Output to stderr from the traffic-agent's sftp and the client's sshfs processes - are properly logged as errors. - - type: bugfix - title: Don't use Windows path separators in workload pod template - body: >- - Auto installer will no longer not emit backslash separators for the /tel-app-mounts paths in the - traffic-agent container spec when running on Windows. - - version: 2.4.9 - date: "2021-12-09" - notes: - - type: bugfix - title: Helm upgrade nil pointer error - body: >- - A helm upgrade using the --reuse-values flag no longer fails on a "nil pointer" error caused by a nil - telpresenceAPI value. - docs: install/helm#upgrading-the-traffic-manager - - version: 2.4.8 - date: "2021-12-03" - notes: - - type: feature - title: VPN diagnostics tool - body: >- - There is a new subcommand, test-vpn, that can be used to diagnose connectivity issues with a VPN. - See the VPN docs for more information on how to use it. - docs: reference/vpn - image: telepresence-2.4.8-vpn.png - - - type: feature - title: RESTful API service - body: >- - A RESTful service was added to Telepresence, both locally to the client and to the traffic-agent to - help determine if messages with a set of headers should be consumed or not from a message queue where the - intercept headers are added to the messages. - docs: reference/restapi - image: telepresence-2.4.8-health-check.png - - - type: change - title: TELEPRESENCE_LOGIN_CLIENT_ID env variable no longer used - body: >- - You could previously configure this value, but there was no reason to change it, so the value - was removed. - - - type: bugfix - title: Tunneled network connections behave more like ordinary TCP connections. - body: >- - When using Telepresence with an external cloud provider for extensions, those tunneled - connections now behave more like TCP connections, especially when it comes to timeouts. - We've also added increased testing around these types of connections. - - version: 2.4.7 - date: "2021-11-24" - notes: - - type: feature - title: Injector service-name annotation - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-service-name, that can be used to set the name of the service to be intercepted. - This will help disambiguate which service to intercept for when a workload is exposed by multiple services, such as can happen with Argo Rollouts - docs: reference/cluster-config#service-name-annotation - - type: feature - title: Skip the Ingress Dialogue - body: >- - You can now skip the ingress dialogue by setting the ingress parameters in the corresponding flags. - docs: reference/intercepts#skipping-the-ingress-dialogue - - type: feature - title: Never proxy subnets - body: >- - The kubeconfig extensions now support a never-proxy argument, - analogous to also-proxy, that defines a set of subnets that - will never be proxied via telepresence. - docs: reference/config#neverproxy - - type: change - title: Daemon versions check - body: >- - Telepresence now checks the versions of the client and the daemons and asks the user to quit and restart if they don't match. - - type: change - title: No explicit DNS flushes - body: >- - Telepresence DNS now uses a very short TTL instead of explicitly flushing DNS by killing the mDNSResponder or doing resolvectl flush-caches - docs: reference/routing#dns-caching - - type: bugfix - title: Legacy flags now work with global flags - body: >- - Legacy flags such as --swap-deployment can now be used together with global flags. - - type: bugfix - title: Outbound connection closing - body: >- - Outbound connections are now properly closed when the peer closes. - - type: bugfix - title: Prevent DNS recursion - body: >- - The DNS-resolver will trap recursive resolution attempts (may happen when the cluster runs in a docker-container on the client). - docs: reference/routing#dns-recursion - - type: bugfix - title: Prevent network recursion - body: >- - The TUN-device will trap failed connection attempts that results in recursive calls back into the TUN-device (may happen when the - cluster runs in a docker-container on the client). - docs: reference/routing#connect-recursion - - type: bugfix - title: Traffic Manager deadlock fix - body: >- - The Traffic Manager no longer runs a risk of entering a deadlock when a new Traffic agent arrives. - - type: bugfix - title: webhookRegistry config propagation - body: >- - The configured webhookRegistry is now propagated to the webhook installer even if no webhookAgentImage has been set. - docs: reference/config#images - - type: bugfix - title: Login refreshes expired tokens - body: >- - When a user's token has expired, telepresence login - will prompt the user to log in again to get a new token. Previously, - the user had to telepresence quit and telepresence logout - to get a new token. - docs: https://github.com/telepresenceio/telepresence/issues/2062 - - version: 2.4.6 - date: "2021-11-02" - notes: - - type: feature - title: Manually injecting Traffic Agent - body: >- - Telepresence now supports manually injecting the traffic-agent YAML into workload manifests. - Use the genyaml command to create the sidecar YAML, then add the telepresence.getambassador.io/manually-injected: "true" annotation to your pods to allow Telepresence to intercept them. - docs: reference/intercepts/manual-agent - - - type: feature - title: Telepresence CLI released for Apple silicon - body: >- - Telepresence is now built and released for Apple silicon. - docs: install/?os=macos - - - type: change - title: Telepresence help text now links to telepresence.io - body: >- - We now include a link to our documentation when you run telepresence --help. This will make it easier - for users to find this page whether they acquire Telepresence through Brew or some other mechanism. - image: telepresence-2.4.6-help-text.png - - - type: bugfix - title: Fixed bug when API server is inside CIDR range of pods/services - body: >- - If the API server for your kubernetes cluster had an IP that fell within the - subnet generated from pods/services in a kubernetes cluster, it would proxy traffic - to the API server which would result in hanging or a failed connection. We now ensure - that the API server is explicitly not proxied. - - version: 2.4.5 - date: "2021-10-15" - notes: - - type: feature - title: Get pod yaml with gather-logs command - body: >- - Adding the flag --get-pod-yaml to your request will get the - pod yaml manifest for all kubernetes components you are getting logs for - ( traffic-manager and/or pods containing a - traffic-agent container). This flag is set to false - by default. - docs: reference/client - image: telepresence-2.4.5-pod-yaml.png - - - type: feature - title: Anonymize pod name + namespace when using gather-logs command - body: >- - Adding the flag --anonymize to your command will - anonymize your pod names + namespaces in the output file. We replace the - sensitive names with simple names (e.g. pod-1, namespace-2) to maintain - relationships between the objects without exposing the real names of your - objects. This flag is set to false by default. - docs: reference/client - image: telepresence-2.4.5-logs-anonymize.png - - - type: feature - title: Support for intercepting headless services - body: >- - Intercepting headless services is now officially supported. You can request a - headless service on whatever port it exposes and get a response from the - intercept. This leverages the same approach as intercepting numeric ports when - using the mutating webhook injector, mainly requires the initContainer - to have NET_ADMIN capabilities. - docs: reference/intercepts/#intercepting-headless-services - - - type: change - title: Use one tunnel per connection instead of multiplexing into one tunnel - body: >- - We have changed Telepresence so that it uses one tunnel per connection instead - of multiplexing all connections into one tunnel. This will provide substantial - performance improvements. Clients will still be backwards compatible with older - managers that only support multiplexing. - - - type: bugfix - title: Added checks for Telepresence kubernetes compatibility - body: >- - Telepresence currently works with Kubernetes server versions 1.17.0 - and higher. We have added logs in the connector and traffic-manager - to let users know when they are using Telepresence with a cluster it doesn't support. - docs: reference/cluster-config - - - type: bugfix - title: Traffic Agent security context is now only added when necessary - body: >- - When creating an intercept, Telepresence will now only set the traffic agent's GID - when strictly necessary (i.e. when using headless services or numeric ports). This mitigates - an issue on openshift clusters where the traffic agent can fail to be created due to - openshift's security policies banning arbitrary GIDs. - - - version: 2.4.4 - date: "2021-09-27" - notes: - - type: feature - title: Numeric ports in agent injector - body: >- - The agent injector now supports injecting Traffic Agents into pods that have unnamed ports. - docs: reference/cluster-config/#note-on-numeric-ports - - - type: feature - title: New subcommand to gather logs and export into zip file - body: >- - Telepresence has logs for various components (the - traffic-manager, traffic-agents, the root and - user daemons), which are integral for understanding and debugging - Telepresence behavior. We have added the telepresence - gather-logs command to make it simple to compile logs for - all Telepresence components and export them in a zip file that can - be shared to others and/or included in a github issue. For more - information on usage, run telepresence gather-logs --help - . - docs: reference/client - image: telepresence-2.4.4-gather-logs.png - - - type: feature - title: Pod CIDR strategy is configurable in Helm chart - body: >- - Telepresence now enables you to directly configure how to get - pod CIDRs when deploying Telepresence with the Helm chart. - The default behavior remains the same. We've also introduced - the ability to explicitly set what the pod CIDRs should be. - docs: install/helm - - - type: bugfix - title: Compute pod CIDRs more efficiently - body: >- - When computing subnets using the pod CIDRs, the traffic-manager - now uses less CPU cycles. - docs: reference/routing/#subnets - - - type: bugfix - title: Prevent busy loop in traffic-manager - body: >- - In some circumstances, the traffic-manager's CPU - would max out and get pinned at its limit. This required a - shutdown or pod restart to fix. We've added some fixes - to prevent the traffic-manager from getting into this state. - - - type: bugfix - title: Added a fixed buffer size to TUN-device - body: >- - The TUN-device now has a max buffer size of 64K. This prevents the - buffer from growing limitlessly until it receies a PSH, which could - be a blocking operation when receiving lots of TCP-packets. - docs: reference/tun-device - - - type: bugfix - title: Fix hanging user daemon - body: >- - When Telepresence encountered an issue connecting to the cluster or - the root daemon, it could hang indefintely. It now will error correctly - when it encounters that situation. - - - type: bugfix - title: Improved proprietary agent connectivity - body: >- - To determine whether the environment cluster is air-gapped, the - proprietary agent attempts to connect to the cloud during startup. - To deal with a possible initial failure, the agent backs off - and retries the connection with an increasing backoff duration. - - - type: bugfix - title: Telepresence correctly reports intercept port conflict - body: >- - When creating a second intercept targetting the same local port, - it now gives the user an informative error message. Additionally, - it tells them which intercept is currently using that port to make - it easier to remedy. - - - version: 2.4.3 - date: "2021-09-15" - notes: - - type: feature - title: Environment variable TELEPRESENCE_INTERCEPT_ID available in interceptor's environment - body: >- - When you perform an intercept, we now include a TELEPRESENCE_INTERCEPT_ID environment - variable in the environment. - docs: reference/environment/#telepresence-environment-variables - - - type: bugfix - title: Improved daemon stability - body: >- - Fixed a timing bug that sometimes caused a "daemon did not start" failure. - - - type: bugfix - title: Complete logs for Windows - body: >- - Crash stack traces and other errors were incorrectly not written to log files. This has - been fixed so logs for Windows should be at parity with the ones in MacOS and Linux. - - - type: bugfix - title: Log rotation fix for Linux kernel 4.11+ - body: >- - On Linux kernel 4.11 and above, the log file rotation now properly reads the - birth-time of the log file. Older kernels continue to use the old behavior - of using the change-time in place of the birth-time. - - - type: bugfix - title: Improved error messaging - body: >- - When Telepresence encounters an error, it tells the user where they should look for - logs related to the error. We have refined this so that it only tells users to look - for errors in the daemon logs for issues that are logged there. - - - type: bugfix - title: Stop resolving localhost - body: >- - When using the overriding DNS resolver, it will no longer apply search paths when - resolving localhost, since that should be resolved on the user's machine - instead of the cluster. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Variable cluster domain - body: >- - Previously, the cluster domain was hardcoded to cluster.local. While this - is true for many kubernetes clusters, it is not for all of them. Now this value is - retrieved from the traffic-manager. - - - type: bugfix - title: Improved cleanup of traffic-agents - body: >- - Telepresence now uninstalls traffic-agents installed via mutating webhook - when using telepresence uninstall --everything. - - - type: bugfix - title: More large file transfer fixes - body: >- - Downloading large files during an intercept will no longer cause timeouts and hanging - traffic-agents. - - - type: bugfix - title: Setting --mount to false when intercepting works as expected - body: >- - When using --mount=false while performing an intercept, the file system - was still mounted. This has been remedied so the intercept behavior respects the - flag. - docs: reference/volume - - - type: bugfix - title: Traffic-manager establishes outbound connections in parallel - body: >- - Previously, the traffic-manager established outbound connections - sequentially. This resulted in slow (and failing) Dial calls would - block all outbound traffic from the workstation (for up to 30 seconds). We now - establish these connections in parallel so that won't occur. - docs: reference/routing/#outbound - - - type: bugfix - title: Status command reports correct DNS settings - body: >- - Telepresence status now correctly reports DNS settings for all operating - systems, instead of Local IP:nil, Remote IP:nil when they don't exist. - - - version: 2.4.2 - date: "2021-09-01" - notes: - - type: feature - title: New subcommand to temporarily change log-level - body: >- - We have added a new telepresence loglevel subcommand that enables users - to temporarily change the log-level for the local demons, the traffic-manager and - the traffic-agents. While the logLevels settings from the config will - still be used by default, this can be helpful if you are currently experiencing an issue and - want to have higher fidelity logs, without doing a telepresence quit and - telepresence connect. You can use telepresence loglevel --help to get - more information on options for the command. - docs: reference/config - - - type: change - title: All components have info as the default log-level - body: >- - We've now set the default for all components of Telepresence (traffic-agent, - traffic-manager, local daemons) to use info as the default log-level. - - - type: bugfix - title: Updating RBAC in helm chart to fix cluster-id regression - body: >- - In 2.4.1, we enabled the traffic-manager to get the cluster ID by getting the UID - of the default namespace. The helm chart was not updated to give the traffic-manager - those permissions, which has since been fixed. This impacted users who use licensed features of - the Telepresence extension in an air-gapped environment. - docs: reference/cluster-config/#air-gapped-cluster - - - type: bugfix - title: Timeouts for Helm actions are now respected - body: >- - The user-defined timeout for Helm actions wasn't always respected, causing the daemon to hang - indefinitely when failing to install the traffic-manager. - docs: reference/config#timeouts - - - version: 2.4.1 - date: "2021-08-30" - notes: - - type: feature - title: External cloud variables are now configurable - body: >- - We now support configuring the host and port for the cloud in your config.yml. These - are used when logging in to utilize features provided by an extension, and are also passed - along as environment variables when installing the traffic-manager. Additionally, we - now run our testsuite with these variables set to localhost to continue to ensure Telepresence - is fully fuctional without depeneding on an external service. The SYSTEMA_HOST and SYSTEMA_PORT - environment variables are no longer used. - image: telepresence-2.4.1-systema-vars.png - docs: reference/config/#cloud - - - type: feature - title: Helm chart can now regenerate certificate used for mutating webhook on-demand. - body: >- - You can now set agentInjector.certificate.regenerate when deploying Telepresence - with the Helm chart to automatically regenerate the certificate used by the agent injector webhook. - docs: install/helm - - - type: change - title: Traffic Manager installed via helm - body: >- - The traffic-manager is now installed via an embedded version of the Helm chart when telepresence connect is first performed on a cluster. - This change is transparent to the user. - A new configuration flag, timeouts.helm sets the timeouts for all helm operations performed by the Telepresence binary. - docs: reference/config#timeouts - - - type: change - title: traffic-manager gets cluster ID itself instead of via environment variable - body: >- - The traffic-manager used to get the cluster ID as an environment variable when running - telepresence connnect or via adding the value in the helm chart. This was - clunky so now the traffic-manager gets the value itself as long as it has permissions - to "get" and "list" namespaces (this has been updated in the helm chart). - docs: install/helm - - - type: bugfix - title: Telepresence now mounts all directories from /var/run/secrets - body: >- - In the past, we only mounted secret directories in /var/run/secrets/kubernetes.io. - We now mount *all* directories in /var/run/secrets, which, for example, includes - directories like eks.amazonaws.com used for IRSA tokens. - docs: reference/volume - - - type: bugfix - title: Max gRPC receive size correctly propagates to all grpc servers - body: >- - This fixes a bug where the max gRPC receive size was only propagated to some of the - grpc servers, causing failures when the message size was over the default. - docs: reference/config/#grpc - - - type: bugfix - title: Updated our Homebrew packaging to run manually - body: >- - We made some updates to our script that packages Telepresence for Homebrew so that it - can be run manually. This will enable maintainers of Telepresence to run the script manually - should we ever need to rollback a release and have latest point to an older verison. - docs: install/ - - - type: bugfix - title: Telepresence uses namespace from kubeconfig context on each call - body: >- - In the past, Telepresence would use whatever namespace was specified in the kubeconfig's current-context - for the entirety of the time a user was connected to Telepresence. This would lead to confusing behavior - when a user changed the context in their kubeconfig and expected Telepresence to acknowledge that change. - Telepresence now will do that and use the namespace designated by the context on each call. - - - type: bugfix - title: Idle outbound TCP connections timeout increased to 7200 seconds - body: >- - Some users were noticing that their intercepts would start failing after 60 seconds. - This was because the keep idle outbound TCP connections were set to 60 seconds, which we have - now bumped to 7200 seconds to match Linux's tcp_keepalive_time default. - - - type: bugfix - title: Telepresence will automatically remove a socket upon ungraceful termination - body: >- - When a Telepresence process terminates ungracefully, it would inform users that "this usually means - that the process has terminated ungracefully" and implied that they should remove the socket. We've - now made it so Telepresence will automatically attempt to remove the socket upon ungraceful termination. - - - type: bugfix - title: Fixed user daemon deadlock - body: >- - Remedied a situation where the user daemon could hang when a user was logged in. - - - type: bugfix - title: Fixed agentImage config setting - body: >- - The config setting images.agentImages is no longer required to contain the repository, and it - will use the value at images.repository. - docs: reference/config/#images - - - version: 2.4.0 - date: "2021-08-04" - notes: - - type: feature - title: Windows Client Developer Preview - body: >- - There is now a native Windows client for Telepresence that is being released as a Developer Preview. - All the same features supported by the MacOS and Linux client are available on Windows. - image: telepresence-2.4.0-windows.png - docs: install - - - type: feature - title: CLI raises helpful messages from Ambassador Cloud - body: >- - Telepresence can now receive messages from Ambassador Cloud and raise - them to the user when they perform certain commands. This enables us - to send you messages that may enhance your Telepresence experience when - using certain commands. Frequency of messages can be configured in your - config.yml. - image: telepresence-2.4.0-cloud-messages.png - docs: reference/config#cloud - - - type: bugfix - title: Improved stability of systemd-resolved-based DNS - body: >- - When initializing the systemd-resolved-based DNS, the routing domain - is set to improve stability in non-standard configurations. This also enables the - overriding resolver to do a proper take over once the DNS service ends. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Fixed an edge case when intercepting a container with multiple ports - body: >- - When specifying a port of a container to intercept, if there was a container in the - pod without ports, it was automatically selected. This has been fixed so we'll only - choose the container with "no ports" if there's no container that explicitly matches - the port used in your intercept. - docs: reference/intercepts/#creating-an-intercept-when-a-service-has-multiple-ports - - - type: bugfix - title: $(NAME) references in agent's environments are now interpolated correctly. - body: >- - If you had an environment variable $(NAME) in your workload that referenced another, intercepts - would not correctly interpolate $(NAME). This has been fixed and works automatically. - - - type: bugfix - title: Telepresence no longer prints INFO message when there is no config.yml - body: >- - Fixed a regression that printed an INFO message to the terminal when there wasn't a - config.yml present. The config is optional, so this message has been - removed. - docs: reference/config - - - type: bugfix - title: Telepresence no longer panics when using --http-match - body: >- - Fixed a bug where Telepresence would panic if the value passed to --http-match - didn't contain an equal sign, which has been fixed. The correct syntax is in the --help - string and looks like --http-match=HTTP2_HEADER=REGEX - docs: reference/intercepts/#intercept-behavior-when-logged-in-to-ambassador-cloud - - - type: bugfix - title: Improved subnet updates - body: >- - The traffic-manager used to update subnets whenever the Nodes or Pods changed, even if - the underlying subnet hadn't changed, which created a lot of unnecessary traffic between the - client and the traffic-manager. This has been fixed so we only send updates when the subnets - themselves actually change. - docs: reference/routing/#subnets - - - version: 2.3.7 - date: "2021-07-23" - notes: - - type: feature - title: Also-proxy in telepresence status - body: >- - An also-proxy entry in the Kubernetes cluster config will - show up in the output of the telepresence status command. - docs: reference/config - - - type: feature - title: Non-interactive telepresence login - body: >- - telepresence login now has an - --apikey=KEY flag that allows for - non-interactive logins. This is useful for headless - environments where launching a web-browser is impossible, - such as cloud shells, Docker containers, or CI. - image: telepresence-2.3.7-newkey.png - docs: reference/client/login/ - - - type: bugfix - title: Mutating webhook injector correctly hides named ports for probes. - body: >- - The mutating webhook injector has been fixed to correctly rename named ports for liveness and readiness probes - docs: reference/cluster-config - - - type: bugfix - title: telepresence current-cluster-id crash fixed - body: >- - Fixed a regression introduced in 2.3.5 that caused telepresence current-cluster-id - to crash. - docs: reference/cluster-config - - - type: bugfix - title: Better UX around intercepts with no local process running - body: >- - Requests would hang indefinitely when initiating an intercept before you - had a local process running. This has been fixed and will result in an - Empty reply from server until you start a local process. - docs: reference/intercepts - - - type: bugfix - title: API keys no longer show as "no description" - body: >- - New API keys generated internally for communication with - Ambassador Cloud no longer show up as "no description" in - the Ambassador Cloud web UI. Existing API keys generated by - older versions of Telepresence will still show up this way. - image: telepresence-2.3.7-keydesc.png - - - type: bugfix - title: Fix corruption of user-info.json - body: >- - Fixed a race condition that logging in and logging out - rapidly could cause memory corruption or corruption of the - user-info.json cache file used when - authenticating with Ambassador Cloud. - - - type: bugfix - title: Improved DNS resolver for systemd-resolved - body: - Telepresence's systemd-resolved-based DNS resolver is now more - stable and in case it fails to initialize, the overriding resolver - will no longer cause general DNS lookup failures when telepresence defaults to - using it. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Faster telepresence list command - body: - The performance of telepresence list has been increased - significantly by reducing the number of calls the command makes to the cluster. - docs: reference/client - - - version: 2.3.6 - date: "2021-07-20" - notes: - - type: bugfix - title: Fix subnet discovery - body: >- - Fixed a regression introduced in 2.3.5 where the Traffic - Manager's RoleBinding did not correctly appoint - the traffic-manager Role, causing - subnet discovery to not be able to work correctly. - docs: reference/rbac/ - - - type: bugfix - title: Fix root-user configuration loading - body: >- - Fixed a regression introduced in 2.3.5 where the root daemon - did not correctly read the configuration file; ignoring the - user's configured log levels and timeouts. - docs: reference/config/ - - - type: bugfix - title: Fix a user daemon crash - body: >- - Fixed an issue that could cause the user daemon to crash - during shutdown, as during shutdown it unconditionally - attempted to close a channel even though the channel might - already be closed. - - - version: 2.3.5 - date: "2021-07-15" - notes: - - type: feature - title: traffic-manager in multiple namespaces - body: >- - We now support installing multiple traffic managers in the same cluster. - This will allow operators to install deployments of telepresence that are - limited to certain namespaces. - image: ./telepresence-2.3.5-traffic-manager-namespaces.png - docs: install/helm - - type: feature - title: No more dependence on kubectl - body: >- - Telepresence no longer depends on having an external - kubectl binary, which might not be present for - OpenShift users (who have oc instead of - kubectl). - - type: feature - title: Max gRPC receive size now configurable - body: >- - The default max size of messages received through gRPC (4 MB) is sometimes insufficient. It can now be configured. - image: ./telepresence-2.3.5-grpc-max-receive-size.png - docs: reference/config/#grpc - - type: feature - title: CLI can be used in air-gapped environments - body: >- - While Telepresence will auto-detect if your cluster is in an air-gapped environment, - we've added an option users can add to their config.yml to ensure the cli acts like it - is in an air-gapped environment. Air-gapped environments require a manually installed - licence. - docs: reference/cluster-config/#air-gapped-cluster - image: ./telepresence-2.3.5-skipLogin.png - - version: 2.3.4 - date: "2021-07-09" - notes: - - type: bugfix - title: Improved IP log statements - body: >- - Some log statements were printing incorrect characters, when they should have been IP addresses. - This has been resolved to include more accurate and useful logging. - docs: reference/config/#log-levels - image: ./telepresence-2.3.4-ip-error.png - - type: bugfix - title: Improved messaging when multiple services match a workload - body: >- - If multiple services matched a workload when performing an intercept, Telepresence would crash. - It now gives the correct error message, instructing the user on how to specify which - service the intercept should use. - image: ./telepresence-2.3.4-improved-error.png - docs: reference/intercepts - - type: bugfix - title: Traffic-manger creates services in its own namespace to determine subnet - body: >- - Telepresence will now determine the service subnet by creating a dummy-service in its own - namespace, instead of the default namespace, which was causing RBAC permissions issues in - some clusters. - docs: reference/routing/#subnets - - type: bugfix - title: Telepresence connect respects pre-existing clusterrole - body: >- - When Telepresence connects, if the traffic-manager's desired clusterrole already exists in the - cluster, Telepresence will no longer try to update the clusterrole. - docs: reference/rbac - - type: bugfix - title: Helm Chart fixed for clientRbac.namespaced - body: >- - The Telepresence Helm chart no longer fails when installing with --set clientRbac.namespaced=true. - docs: install/helm - - version: 2.3.3 - date: "2021-07-07" - notes: - - type: feature - title: Traffic Manager Helm Chart - body: >- - Telepresence now supports installing the Traffic Manager via Helm. - This will make it easy for operators to install and configure the - server-side components of Telepresence separately from the CLI (which - in turn allows for better separation of permissions). - image: ./telepresence-2.3.3-helm.png - docs: install/helm/ - - type: feature - title: Traffic-manager in custom namespace - body: >- - As the traffic-manager can now be installed in any - namespace via Helm, Telepresence can now be configured to look for the - Traffic Manager in a namespace other than ambassador. - This can be configured on a per-cluster basis. - image: ./telepresence-2.3.3-namespace-config.png - docs: reference/config - - type: feature - title: Intercept --to-pod - body: >- - telepresence intercept now supports a - --to-pod flag that can be used to port-forward sidecars' - ports from an intercepted pod. - image: ./telepresence-2.3.3-to-pod.png - docs: reference/intercepts - - type: change - title: Change in migration from edgectl - body: >- - Telepresence no longer automatically shuts down the old - api_version=1 edgectl daemon. If migrating - from such an old version of edgectl you must now manually - shut down the edgectl daemon before running Telepresence. - This was already the case when migrating from the newer - api_version=2 edgectl. - - type: bugfix - title: Fixed error during shutdown - body: >- - The root daemon no longer terminates when the user daemon disconnects - from its gRPC streams, and instead waits to be terminated by the CLI. - This could cause problems with things not being cleaned up correctly. - - type: bugfix - title: Intercepts will survive deletion of intercepted pod - body: >- - An intercept will survive deletion of the intercepted pod provided - that another pod is created (or already exists) that can take over. - - version: 2.3.2 - date: "2021-06-18" - notes: - # Headliners - - type: feature - title: Service Port Annotation - body: >- - The mutator webhook for injecting traffic-agents now - recognizes a - telepresence.getambassador.io/inject-service-port - annotation to specify which port to intercept; bringing the - functionality of the --port flag to users who - use the mutator webook in order to control Telepresence via - GitOps. - image: ./telepresence-2.3.2-svcport-annotation.png - docs: reference/cluster-config#service-port-annotation - - type: feature - title: Outbound Connections - body: >- - Outbound connections are now routed through the intercepted - Pods which means that the connections originate from that - Pod from the cluster's perspective. This allows service - meshes to correctly identify the traffic. - docs: reference/routing/#outbound - - type: change - title: Inbound Connections - body: >- - Inbound connections from an intercepted agent are now - tunneled to the manager over the existing gRPC connection, - instead of establishing a new connection to the manager for - each inbound connection. This avoids interference from - certain service mesh configurations. - docs: reference/routing/#inbound - - # RBAC changes - - type: change - title: Traffic Manager needs new RBAC permissions - body: >- - The Traffic Manager requires RBAC - permissions to list Nodes, Pods, and to create a dummy - Service in the manager's namespace. - docs: reference/routing/#subnets - - type: change - title: Reduced developer RBAC requirements - body: >- - The on-laptop client no longer requires RBAC permissions to list the Nodes - in the cluster or to create Services, as that functionality - has been moved to the Traffic Manager. - - # Bugfixes - - type: bugfix - title: Able to detect subnets - body: >- - Telepresence will now detect the Pod CIDR ranges even if - they are not listed in the Nodes. - image: ./telepresence-2.3.2-subnets.png - docs: reference/routing/#subnets - - type: bugfix - title: Dynamic IP ranges - body: >- - The list of cluster subnets that the virtual network - interface will route is now configured dynamically and will - follow changes in the cluster. - - type: bugfix - title: No duplicate subnets - body: >- - Subnets fully covered by other subnets are now pruned - internally and thus never superfluously added to the - laptop's routing table. - docs: reference/routing/#subnets - - type: change # not a bugfix, but it only makes sense to mention after the above bugfixes - title: Change in default timeout - body: >- - The trafficManagerAPI timeout default has - changed from 5 seconds to 15 seconds, in order to facilitate - the extended time it takes for the traffic-manager to do its - initial discovery of cluster info as a result of the above - bugfixes. - - type: bugfix - title: Removal of DNS config files on macOS - body: >- - On macOS, files generated under - /etc/resolver/ as the result of using - include-suffixes in the cluster config are now - properly removed on quit. - docs: reference/routing/#macos-resolver - - - type: bugfix - title: Large file transfers - body: >- - Telepresence no longer erroneously terminates connections - early when sending a large HTTP response from an intercepted - service. - - type: bugfix - title: Race condition in shutdown - body: >- - When shutting down the user-daemon or root-daemon on the - laptop, telepresence quit and related commands - no longer return early before everything is fully shut down. - Now it can be counted on that by the time the command has - returned that all of the side-effects on the laptop have - been cleaned up. - - version: 2.3.1 - date: "2021-06-14" - notes: - - title: DNS Resolver Configuration - body: "Telepresence now supports per-cluster configuration for custom dns behavior, which will enable users to determine which local + remote resolver to use and which suffixes should be ignored + included. These can be configured on a per-cluster basis." - image: ./telepresence-2.3.1-dns.png - docs: reference/config - type: feature - - title: AlsoProxy Configuration - body: "Telepresence now supports also proxying user-specified subnets so that they can access external services only accessible to the cluster while connected to Telepresence. These can be configured on a per-cluster basis and each subnet is added to the TUN device so that requests are routed to the cluster for IPs that fall within that subnet." - image: ./telepresence-2.3.1-alsoProxy.png - docs: reference/config - type: feature - - title: Mutating Webhook for Injecting Traffic Agents - body: "The Traffic Manager now contains a mutating webhook to automatically add an agent to pods that have the telepresence.getambassador.io/traffic-agent: enabled annotation. This enables Telepresence to work well with GitOps CD platforms that rely on higher level kubernetes objects matching what is stored in git. For workloads without the annotation, Telepresence will add the agent the way it has in the past" - image: ./telepresence-2.3.1-inject.png - docs: reference/rbac - type: feature - - title: Traffic Manager Connect Timeout - body: "The trafficManagerConnect timeout default has changed from 20 seconds to 60 seconds, in order to facilitate the extended time it takes to apply everything needed for the mutator webhook." - image: ./telepresence-2.3.1-trafficmanagerconnect.png - docs: reference/config - type: change - - title: Fix for large file transfers - body: "Fix a tun-device bug where sometimes large transfers from services on the cluster would hang indefinitely" - image: ./telepresence-2.3.1-large-file-transfer.png - docs: reference/tun-device - type: bugfix - - title: Brew Formula Changed - body: "Now that the Telepresence rewrite is the main version of Telepresence, you can install it via Brew like so: brew install datawire/blackbird/telepresence." - image: ./telepresence-2.3.1-brew.png - docs: install/ - type: change - - version: 2.3.0 - date: "2021-06-01" - notes: - - title: Brew install Telepresence - body: "Telepresence can now be installed via brew on macOS, which makes it easier for users to stay up-to-date with the latest telepresence version. To install via brew, you can use the following command: brew install datawire/blackbird/telepresence2." - image: ./telepresence-2.3.0-homebrew.png - docs: install/ - type: feature - - title: TCP and UDP routing via Virtual Network Interface - body: "Telepresence will now perform routing of outbound TCP and UDP traffic via a Virtual Network Interface (VIF). The VIF is a layer 3 TUN-device that exists while Telepresence is connected. It makes the subnets in the cluster available to the workstation and will also route DNS requests to the cluster and forward them to intercepted pods. This means that pods with custom DNS configuration will work as expected. Prior versions of Telepresence would use firewall rules and were only capable of routing TCP." - image: ./tunnel.jpg - docs: reference/tun-device - type: feature - - title: SSH is no longer used - body: "All traffic between the client and the cluster is now tunneled via the traffic manager gRPC API. This means that Telepresence no longer uses ssh tunnels and that the manager no longer have an sshd installed. Volume mounts are still established using sshfs but it is now configured to communicate using the sftp-protocol directly, which means that the traffic agent also runs without sshd. A desired side effect of this is that the manager and agent containers no longer need a special user configuration." - image: ./no-ssh.png - docs: reference/tun-device/#no-ssh-required - type: change - - title: Running in a Docker container - body: "Telepresence can now be run inside a Docker container. This can be useful for avoiding side effects on a workstation's network, establishing multiple sessions with the traffic manager, or working with different clusters simultaneously." - image: ./run-tp-in-docker.png - docs: reference/inside-container - type: feature - - title: Configurable Log Levels - body: "Telepresence now supports configuring the log level for Root Daemon and User Daemon logs. This provides control over the nature and volume of information that Telepresence generates in daemon.log and connector.log." - image: ./telepresence-2.3.0-loglevels.png - docs: reference/config/#log-levels - type: feature - - version: 2.2.2 - date: "2021-05-17" - notes: - - title: Legacy Telepresence subcommands - body: Telepresence is now able to translate common legacy Telepresence commands into native Telepresence commands. So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used to with the new Telepresence binary. - image: ./telepresence-2.2.png - docs: install/migrate-from-legacy/ - type: feature diff --git a/docs/latest/versions.yml b/docs/latest/versions.yml deleted file mode 100644 index 5a77b1d5..00000000 --- a/docs/latest/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.19.1" -dlVersion: "v2.19.1" -docsVersion: "2.15" -branch: release/v2 -productName: "Telepresence OSS" diff --git a/docs/pre-release/community.md b/docs/pre-release/community.md deleted file mode 100644 index 922457c9..00000000 --- a/docs/pre-release/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/pre-release/concepts/context-prop.md b/docs/pre-release/concepts/context-prop.md deleted file mode 100644 index b3eb41e3..00000000 --- a/docs/pre-release/concepts/context-prop.md +++ /dev/null @@ -1,37 +0,0 @@ -# Context propagation - -**Context propagation** is the transfer of request metadata across the services and remote processes of a distributed system. Telepresence uses context propagation to intelligently route requests to the appropriate destination. - -This metadata is the context that is transferred across system services. It commonly takes the form of HTTP headers; context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -Metadata propagation refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - - -## What is distributed tracing? - -Distributed tracing is a technique for troubleshooting and profiling distributed microservices applications and is a common application for context propagation. It is becoming a key component for debugging. - -In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) (or another header format, such as [B3 headers](https://github.com/openzipkin/b3-propagation)), which facilitates maintaining the headers through every service without being stripped (the propagation). - - -## What are intercepts and preview URLs? - -[Intercepts](../../reference/intercepts) and [preview -URLs](../../howtos/preview-urls/) are functions of Telepresence that -enable easy local development from a remote Kubernetes cluster and -offer a preview environment for sharing and real-time collaboration. - -Telepresence uses custom HTTP headers and header propagation to -identify which traffic to intercept both for plain personal intercepts -and for personal intercepts with preview URLs; these techniques are -more commonly used for distributed tracing, so what they are being -used for is a little unorthodox, but the mechanisms for their use are -already widely deployed because of the prevalence of tracing. The -headers facilitate the smart routing of requests either to live -services in the cluster or services running locally on a developer’s -machine. The intercepted traffic can be further limited by using path -based routing. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to [Ambassador Cloud](https://app.getambassador.io) with other information about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. diff --git a/docs/pre-release/concepts/devloop.md b/docs/pre-release/concepts/devloop.md deleted file mode 100644 index 8b1fbf35..00000000 --- a/docs/pre-release/concepts/devloop.md +++ /dev/null @@ -1,50 +0,0 @@ -# The developer experience and the inner dev loop - -## How is the developer experience changing? - -The developer experience is the workflow a developer uses to develop, test, deploy, and release software. - -Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. - -The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, [controlled (canary) release](https://www.getambassador.io/docs/argo/latest/concepts/canary/), and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a [GitOps workflow](https://www.getambassador.io/docs/argo/latest/concepts/gitops/#what-is-gitops) and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. - -Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. - -Engineers now must design and build distributed service-based applications _and_ also assume responsibility for the full development life cycle. The new developer experience means that developers can no longer rely on monolithic application developer best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner development loop. Now developers have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this adds development time to the equation. - -## What is the inner dev loop? - -The inner dev loop is the single developer workflow. A single developer should be able to set up and use an inner dev loop to code and test changes quickly. - -Even within the Kubernetes space, developers will find much of the inner dev loop familiar. That is, code can still be written locally at a level that a developer controls and committed to version control. - -In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code — they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. - -![traditional inner dev loop](../../images/trad-inner-dev-loop.png) - -## In search of lost time: How does containerization change the inner dev loop? - -The inner dev loop is where writing and testing code happens, and time is critical for maximum developer productivity and getting features in front of end users. The faster the feedback loop, the faster developers can refactor and test again. - -Changes to the inner dev loop process, i.e., containerization, threaten to slow this development workflow down. Coding stays the same in the new inner dev loop, but code has to be containerized. The _containerized_ inner dev loop requires a number of new steps: - -* packaging code in containers -* writing a manifest to specify how Kubernetes should run the application (e.g., YAML-based configuration information, such as how much memory should be given to a container) -* pushing the container to the registry -* deploying containers in Kubernetes - -Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. - - -![container inner dev loop](../../images/container-inner-dev-loop.png) - -## Tackling the slow inner dev loop - -A slow inner dev loop can negatively impact frontend and backend teams, delaying work on individual and team levels and slowing releases into production overall. - -For example: - -* Frontend developers have to wait for previews of backend changes on a shared dev/staging environment (for example, until CI/CD deploys a new version) and/or rely on mocks/stubs/virtual services when coding their application locally. These changes are only verifiable by going through the CI/CD process to build and deploy within a target environment. -* Backend developers have to wait for CI/CD to build and deploy their app to a target environment to verify that their code works correctly with cluster or cloud-based dependencies as well as to share their work to get feedback. - -New technologies and tools can facilitate cloud-native, containerized development. And in the case of a sluggish inner dev loop, developers can accelerate productivity with tools that help speed the loop up again. diff --git a/docs/pre-release/concepts/devworkflow.md b/docs/pre-release/concepts/devworkflow.md deleted file mode 100644 index fa24fc2b..00000000 --- a/docs/pre-release/concepts/devworkflow.md +++ /dev/null @@ -1,7 +0,0 @@ -# The changing development workflow - -A changing workflow is one of the main challenges for developers adopting Kubernetes. Software development itself isn’t the challenge. Developers can continue to [code using the languages and tools with which they are most productive and comfortable](https://www.getambassador.io/resources/kubernetes-local-dev-toolkit/). That’s the beauty of containerized development. - -However, the cloud-native, Kubernetes-based approach to development means adopting a new development workflow and development environment. Beyond the basics, such as figuring out how to containerize software, [how to run containers in Kubernetes](https://www.getambassador.io/docs/kubernetes/latest/concepts/appdev/), and how to deploy changes into containers, for example, Kubernetes adds complexity before it delivers efficiency. The promise of a “quicker way to develop software” applies at least within the traditional aspects of the inner dev loop, where the single developer codes, builds and tests their software. But both within the inner dev loop and once code is pushed into version control to trigger the outer dev loop, the developer experience changes considerably from what many developers are used to. - -In this new paradigm, new steps are added to the inner dev loop, and more broadly, the developer begins to share responsibility for the full life cycle of their software. Inevitably this means taking new workflows and tools on board to ensure that the full life cycle continues full speed ahead. diff --git a/docs/pre-release/concepts/faster.md b/docs/pre-release/concepts/faster.md deleted file mode 100644 index b649e415..00000000 --- a/docs/pre-release/concepts/faster.md +++ /dev/null @@ -1,25 +0,0 @@ -# Making the remote local: Faster feedback, collaboration and debugging - -With the goal of achieving [fast, efficient development](https://www.getambassador.io/use-case/local-kubernetes-development/), developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. - -## How should I set up a Kubernetes development environment? - -[Setting up a development environment](https://www.getambassador.io/resources/development-environments-microservices/) for Kubernetes can be much more complex than the set up for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. - -While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. - -A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../../quick-start/) to facilitate fast feedback, debugging and collaboration. - -## What is Telepresence? - -Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../../quick-start/). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. - -## How can I get fast, efficient local development? - -The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. - -A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code, local debugging in production, and [preview URL](../../howtos/preview-urls/) functionality to share dev environments with others for multi-user collaboration. - -Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. - -The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts and preview URLs. diff --git a/docs/pre-release/concepts/intercepts.md b/docs/pre-release/concepts/intercepts.md deleted file mode 100644 index 4b1e770f..00000000 --- a/docs/pre-release/concepts/intercepts.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "Types of intercepts" -description: "Short demonstration of personal vs global intercepts" ---- - -import React from 'react'; - -import Alert from '@material-ui/lab/Alert'; -import AppBar from '@material-ui/core/AppBar'; -import InterceptAnimationSVG from '@src/assets/images/intercept-animation.inline.svg' -import Paper from '@material-ui/core/Paper'; -import Tab from '@material-ui/core/Tab'; -import TabContext from '@material-ui/lab/TabContext'; -import TabList from '@material-ui/lab/TabList'; -import TabPanel from '@material-ui/lab/TabPanel'; - -export function Animation(props) { - let el = React.useRef(null); - React.useEffect(() => { - const queueAnimation = () => { - setTimeout(() => { - el.current?.getAnimations({subtree: true})?.forEach((anim) => { - anim.finish(); - anim.play(); - }) - queueAnimation(); - }, 3000); - }; - queueAnimation(); - }, el); - return ( -
- -
- ); -}; - -export function TabsContainer({ children, ...props }) { - const [state, setState] = React.useState({curTab: "personal"}); - React.useEffect(() => { - const query = new URLSearchParams(window.location.search); - var interceptType = query.get('intercept') || "personal"; - if (state.curTab != interceptType) { - setState({curTab: interceptType}); - } - }, [state, setState]) - var setURL = function(newTab) { - history.replaceState(null,null, - `?intercept=${newTab}${window.location.hash}`, - ); - }; - return ( -
- - - {setState({curTab: newTab}); setURL(newTab)}} aria-label="intercept types"> - - - - - - {children} - -
- ); -}; - -# Types of intercepts - - - - -# No intercept - - - - -This is the normal operation of your cluster without Telepresence. - - - - - -# Global intercept - - - - -**Global intercepts** replace the Kubernetes "Orders" service with the -Orders service running on your laptop. The users see no change, but -with all the traffic coming to your laptop, you can observe and debug -with all your dev tools. - - - -### Creating and using global intercepts - - 1. Creating the intercept: Intercept your service from your CLI: - - ```shell - telepresence intercept SERVICENAME --http-match=all - ``` - - - - Make sure your current kubectl context points to the target - cluster. If your service is running in a different namespace than - your current active context, use or change the `--namespace` flag. - - - - 2. Using the intercept: Send requests to your service: - - All requests will be sent to the version of your service that is - running in the local development environment. - - - - -# Personal intercept - -**Personal intercepts** allow you to be selective and intercept only -some of the traffic to a service while not interfering with the rest -of the traffic. This allows you to share a cluster with others on your -team without interfering with their work. - - - - -In the illustration above, **Orange** -requests are being made by Developer 2 on their laptop and the -**green** are made by a teammate, -Developer 1, on a different laptop. - -Each developer can intercept the Orders service for their requests only, -while sharing the rest of the development environment. - - - -### Creating and using personal intercepts - - 1. Creating the intercept: Intercept your service from your CLI: - - ```shell - telepresence intercept SERVICENAME --http-match=Personal-Intercept=126a72c7-be8b-4329-af64-768e207a184b - ``` - - We're using - `Personal-Intercept=126a72c7-be8b-4329-af64-768e207a184b` as the - header for the sake of the example, but you can use any - `key=value` pair you want, or `--http-match=auto` to have it - choose something automatically. - - - - Make sure your current kubect context points to the target - cluster. If your service is running in a different namespace than - your current active context, use or change the `--namespace` flag. - - - - 2. Using the intercept: Send requests to your service by passing the - HTTP header: - - ```http - Personal-Intercept: 126a72c7-be8b-4329-af64-768e207a184b - ``` - - - - Need a browser extension to modify or remove an HTTP-request-headers? - - Chrome - {' '} - Firefox - - - - 3. Using the intercept: Send requests to your service without the - HTTP header: - - Requests without the header will be sent to the version of your - service that is running in the cluster. This enables you to share - the cluster with a team! - -### Intercepting a specific endpoint - -It's not uncommon to have one service serving several endpoints. Telepresence is capable of limiting an -intercept to only affect the endpoints you want to work with by using one of the `--http-path-xxx` -flags below in addition to using `--http-match` flags. Only one such flag can be used in an intercept -and, contrary to the `--http-match` flag, it cannot be repeated. - -The following flags are available: - -| Flag | Meaning | -|-------------------------------|------------------------------------------------------------------| -| `--http-path-equal ` | Only intercept the endpoint for this exact path | -| `--http-path-prefix ` | Only intercept endpoints with a matching path prefix | -| `--http-path-regex ` | Only intercept endpoints that match the given regular expression | - - - diff --git a/docs/pre-release/doc-links.yml b/docs/pre-release/doc-links.yml deleted file mode 100644 index c21cf9f3..00000000 --- a/docs/pre-release/doc-links.yml +++ /dev/null @@ -1,82 +0,0 @@ - - title: Quick start - link: quick-start - - title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Install Traffic Manager with Helm - link: install/helm/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ - - title: Core concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: 'Making the remote local: Faster feedback, collaboration and debugging' - link: concepts/faster - - title: Context propagation - link: concepts/context-prop - - title: Types of intercepts - link: concepts/intercepts - - title: How do I... - items: - - title: Intercept a service in your own environment - link: howtos/intercepts - - title: Share dev environments with preview URLs - link: howtos/preview-urls - - title: Proxy outbound traffic to my cluster - link: howtos/outbound - - title: Send requests to an intercepted service - link: howtos/request - - title: Technical reference - items: - - title: Architecture - link: reference/architecture - - title: Client reference - link: reference/client - items: - - title: login - link: reference/client/login - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Using Docker for intercepts - link: reference/docker-run - - title: Running Telepresence in a Docker container - link: reference/inside-container - - title: Environment variables - link: reference/environment - - title: Intercepts - link: reference/intercepts/ - items: - - title: Manually injecting the Traffic Agent - link: reference/intercepts/manual-agent - - title: Volume mounts - link: reference/volume - - title: RESTful API service - link: reference/restapi - - title: DNS resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: Telepresence and VPNs - link: reference/vpn - - title: Networking through Virtual Network Interface - link: reference/tun-device - - title: Connection Routing - link: reference/routing - - title: Using Telepresence with Linkerd - link: reference/linkerd - - title: FAQs - link: faqs - - title: Troubleshooting - link: troubleshooting - - title: Community - link: community - - title: Release Notes - link: release-notes diff --git a/docs/pre-release/faqs.md b/docs/pre-release/faqs.md deleted file mode 100644 index 08eab7a5..00000000 --- a/docs/pre-release/faqs.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What operating systems does Telepresence work on?** - -Telepresence currently works natively on macOS (Intel and Apple silicon), Linux, and WSL 2. Starting with v2.4.0, we are also releasing a native Windows version of Telepresence that we are considering a Developer Preview. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](https://www.getambassador.io/feedback/) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Why are my intercepts still reporting as active when they've been disconnected?** - - In certain cases, Telepresence might not have been able to communicate back with Ambassador Cloud to update the intercept's status. Worry not, they will get garbage collected after a period of time. - -** Why is my intercept associated with an "Unreported" cluster?** - - Intercepts tagged with "Unreported" clusters simply mean Ambassador Cloud was unable to associate a service instance with a known detailed service from an Edge Stack or API Gateway cluster. [Connecting your cluster to the Service Catalog](/docs/telepresence/latest/quick-start/) will properly match your services from multiple data sources. - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the traffic-manager and traffic-agent images that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single `traffic-manager` service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the `traffic-manager` service installed in the cluster and `traffic-agent` containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established by using -the `kubectl port-forward` machinery (though without actually spawning -a separate program) to establish a TCP connection to Telepresence -Traffic Manager in the cluster, and running Telepresence's custom VPN -protocol over that TCP connection. - - - -** What identity providers are supported for authenticating to view a preview URL?** - -* GitHub -* GitLab -* Google - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](https://www.getambassador.io/feedback/) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Yes it is! You can find its source code on [GitHub](https://github.com/telepresenceio/telepresence). - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](https://www.getambassador.io/feedback/), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/pre-release/howtos/intercepts.md b/docs/pre-release/howtos/intercepts.md deleted file mode 100644 index 6adebd6c..00000000 --- a/docs/pre-release/howtos/intercepts.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Intercept a service in your own environment - -Telepresence enables you to create intercepts to a target Kubernetes workload. Once you have created and intercept, you can code and debug your associated service locally. - -For a detailed walk-though on creating intercepts using our sample app, follow the [quick start guide](../../quick-start/demo-node/). - - -## Prerequisites - -Before you begin, you need to have [Telepresence installed](../../install/), and either the Kubernetes command-line tool, [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), or the OpenShift Container Platform command-line interface, [`oc`](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). This document uses kubectl in all example commands. OpenShift users can substitute oc [commands instead](https://docs.openshift.com/container-platform/4.1/cli_reference/developer-cli-commands.html). - -This guide assumes you have a Kubernetes deployment and service accessible publicly by an ingress controller, and that you can run a copy of that service on your laptop. - - -## Intercept your service with a global intercept - -With Telepresence, you can create [global intercepts](../../concepts/intercepts/?intercept=global) that intercept all traffic going to a service in your cluster and route it to your local environment instead. - -1. Connect to your cluster with `telepresence connect` and connect to the Kubernetes API server: - - ```console - $ curl -ik https://kubernetes.default - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - The 401 response is expected when you first connect. - - - You now have access to your remote Kubernetes API server as if you were on the same network. You can now use any local tools to connect to any service in the cluster. - - If you have difficulties connecting, make sure you are using Telepresence 2.0.3 or a later version. Check your version by entering `telepresence version` and [upgrade if needed](../../install/upgrade/). - - -2. Enter `telepresence list` and make sure the service you want to intercept is listed. For example: - - ```console - $ telepresence list - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -3. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example: - - ```console - $ kubectl get service example-service --output yaml - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -4. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - * For `--port`: specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - * For `--env-file`: specify a file path for Telepresence to write the environment variables that are set in the pod. - The example below shows Telepresence intercepting traffic going to service `example-service`. Requests now reach the service on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - ```console - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - - The following are some examples of how to pass the environment variables to your local process: - * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). - * **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. - * **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Query the environment in which you intercepted a service and verify your local instance being invoked. - All the traffic previously routed to your Kubernetes Service is now routed to your local environment - -You can now: -- Make changes on the fly and see them reflected when interacting with - your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - - - - **Didn't work?** Make sure the port you're listening on matches the one you specified when you created your intercept. - - diff --git a/docs/pre-release/howtos/outbound.md b/docs/pre-release/howtos/outbound.md deleted file mode 100644 index e148023e..00000000 --- a/docs/pre-release/howtos/outbound.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Proxy outbound traffic to my cluster - -While preview URLs are a powerful feature, Telepresence offers other options for proxying traffic between your laptop and the cluster. This section discribes how to proxy outbound traffic and control outbound connectivity to your cluster. - - This guide assumes that you have the quick start sample web app running in your cluster to test accessing the web-app service. You can substitute this service for any other service you are running. - -## Proxying outbound traffic - -Connecting to the cluster instead of running an intercept allows you to access cluster workloads as if your laptop was another pod in the cluster. This enables you to access other Kubernetes services using `.`. A service running on your laptop can interact with other services on the cluster by name. - -When you connect to your cluster, the background daemon on your machine runs and installs the [Traffic Manager deployment](../../reference/architecture/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect` and enter your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.3.7 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground /home//.cache/telepresence/logs '' ''" - [sudo] password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -2. Run `telepresence status` to confirm connection to your cluster and that it is proxying traffic. - - ``` - $ telepresence status - Root Daemon: Running - Version : v2.3.7 (api 3) - Primary DNS : "" - Fallback DNS: "" - User Daemon: Running - Version : v2.3.7 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 0 total - ``` - -3. Access your service by name with `curl web-app.emojivoto:80`. Telepresence routes the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl web-app.emojivoto:80 - - - - - Emoji Vote - ... - ``` - -If you terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -When using Telepresence in this way, you need to access services with the namespace qualified DNS name (<service name>.<namespace>) before you start an intercept. After you start an intercept, only <service name> is required. Read more about these differences in the DNS resolution reference guide. - -## Controlling outbound connectivity - -By default, Telepresence provides access to all Services found in all namespaces in the connected cluster. This can lead to problems if the user does not have RBAC access permissions to all namespaces. You can use the `--mapped-namespaces ` flag to control which namespaces are accessible. - -When you use the `--mapped-namespaces` flag, you need to include all namespaces containing services you want to access, as well as all namespaces that contain services related to the intercept. - -### Using local-only intercepts - -When you develop on isolated apps or on a virtualized container, you don't need an outbound connection. However, when developing services that aren't deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service will be deployed. This is because services that aren't exposed through ingress controllers require connectivity to those services. When you provide outbound connectivity, the service can access other services in that namespace without using qualified names. A local-only intercept does not cause outbound connections to originate from the intercepted namespace. The reason for this is to establish correct origin; the connection must be routed to a `traffic-agent`of an intercepted pod. For local-only intercepts, the outbound connections originates from the `traffic-manager`. - -To control outbound connectivity to specific namespaces, add the `--local-only` flag: - - ``` - $ telepresence intercept --namespace --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. -You can deactivate the intercept with `telepresence leave `. This removes unqualified name access. - -### Proxy outcound connectivity for laptops - -To specify additional hosts or subnets that should be resolved inside of the cluster, see [AlsoProxy](../../reference/config/#alsoproxy) for more details. \ No newline at end of file diff --git a/docs/pre-release/howtos/preview-urls.md b/docs/pre-release/howtos/preview-urls.md deleted file mode 100644 index 670f72dd..00000000 --- a/docs/pre-release/howtos/preview-urls.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Share development environments with preview URLs - -Telepresence can generate sharable preview URLs. This enables you to work on a copy of your service locally, and share that environment with a teammate for pair programming. While using preview URLs, Telepresence will route only the requests coming from that preview URL to your local environment. Requests to the ingress are routed to your cluster as usual. - -Preview URLs are protected behind authentication through Ambassador Cloud, and, access to the URL is only available to users in your organization. You can make the URL publicly accessible for sharing with outside collaborators. - -## Creating a preview URL - -1. Connect to Telepresence and enter the `telepresence list` command in your CLI to verify the service is listed. -Telepresence only supports Deployments, ReplicaSets, and StatefulSet workloads with a label that matches a Service. - -2. Enter `telepresence login` to launch Ambassador Cloud in your browser. - - If you are in an environment you can't launch Telepresence in your local browser, enter If you are in an environment where Telepresence cannot launch in a local browser, pass the [`--apikey` flag to `telepresence login`](../../reference/client/login/). - -3. Start the intercept with `telepresence intercept --port --env-file `and adjust the flags as follows: - Start the intercept: - * **port:** specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - * **env-file:** specify a file path for Telepresence to write the environment variables that are set in the pod. - -4. Answer the question prompts. - * **IWhat's your ingress' IP address?**: whether the ingress controller is expecting TLS communication on the specified port. - * **What's your ingress' TCP port number?**: the port your ingress controller is listening to. This is often 443 for TLS ports, and 80 for non-TLS ports. - * **Does that TCP port on your ingress use TLS (as opposed to cleartext)?**: whether the ingress controller is expecting TLS communication on the specified port. - * **If required by your ingress, specify a different hostname (TLS-SNI, HTTP "Host" header) to be used in requests.**: if your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), enter that value here. - - The example below shows a preview URL for `example-service` which listens on port 8080. The preview URL for ingress will use the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and the hostname `dev-environment.edgestack.me`: - - ```console -$ telepresence intercept example-service --port 8080 --env-file ~/ex-svc.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' TCP port number? - - [default: -]: 80 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - - Here are some examples of how to pass the environment variables to your local process: - * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). - * **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. - * **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Go to the Preview URL generated from the intercept. -Traffic is now intercepted from your preview URL without impacting other traffic from your Ingress. - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -7. Make a request on the URL you would usually query for that environment. Don't route a request to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) routes to services in the cluster like normal. - -8. Share with a teammate. - - You can collaborate with teammates by sending your preview URL to them. Once your teammate logs in, they must select the same identity provider and org as you are using. This authorizes their access to the preview URL. When they visit the preview URL, they see the intercepted service running on your laptop. - You can now collaborate with a teammate to debug the service on the shared intercept URL without impacting the production environment. - -## Sharing a preview URL with people outside your team - -To collaborate with someone outside of your identity provider's organization: -Log into [Ambassador Cloud](https://app.getambassador.io/cloud/). - navigate to your service's intercepts, select the preview URL details, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the preview URL either from the dashboard or by running `telepresence preview remove ` also removes all access to the preview URL. - -## Change access restrictions - -To collaborate with someone outside of your identity provider's organization, you must make your preview URL publicly accessible. - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/). -2. Select the service you want to share and open the service details page. -3. Click the **Intercepts** tab and expand the preview URL details. -4. Click **Make Publicly Accessible**. - -Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on a local environment. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. - -## Remove a preview URL from an Intercept - -To delete a preview URL and remove all access to the intercepted service, - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) -2. Click on the service you want to share and open the service details page. -3. Click the **Intercepts** tab and expand the preview URL details. -4. Click **Remove Preview**. - -Alternatively, you can remove a preview URL with the following command: -`telepresence preview remove ` diff --git a/docs/pre-release/howtos/request.md b/docs/pre-release/howtos/request.md deleted file mode 100644 index 1109c68d..00000000 --- a/docs/pre-release/howtos/request.md +++ /dev/null @@ -1,12 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Send requests to an intercepted service - -Ambassador Cloud can inform you about the required request parameters to reach an intercepted service. - - 1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) - 2. Navigate to the desired service Intercepts page - 3. Click the **Query** button to open the pop-up menu. - 4. Toggle between **CURL**, **Headers** and **Browse**. - -The pre-built queries and header information will help you get started to query the desired intercepted service and manage header propagation. diff --git a/docs/pre-release/images/github-login.png b/docs/pre-release/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/pre-release/images/github-login.png and /dev/null differ diff --git a/docs/pre-release/install/helm.md b/docs/pre-release/install/helm.md deleted file mode 100644 index 688d2f20..00000000 --- a/docs/pre-release/install/helm.md +++ /dev/null @@ -1,181 +0,0 @@ -# Install with Helm - -[Helm](https://helm.sh) is a package manager for Kubernetes that automates the release and management of software on Kubernetes. The Telepresence Traffic Manager can be installed via a Helm chart with a few simple steps. - -**Note** that installing the Traffic Manager through Helm will prevent `telepresence connect` from ever upgrading it. If you wish to upgrade a Traffic Manager that was installed via the Helm chart, please see the steps [below](#upgrading-the-traffic-manager) - -For more details on what the Helm chart installs and what can be configured, see the Helm chart [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). - -## Before you begin - -The Telepresence Helm chart is hosted by Ambassador Labs and published at `https://app.getambassador.io`. - -Start by adding this repo to your Helm client with the following command: - -```shell -helm repo add datawire https://app.getambassador.io -helm repo update -``` - -## Install with Helm - -When you run the Helm chart, it installs all the components required for the Telepresence Traffic Manager. - -1. If you are installing the Telepresence Traffic Manager **for the first time on your cluster**, create the `ambassador` namespace in your cluster: - - ```shell - kubectl create namespace ambassador - ``` - -2. Install the Telepresence Traffic Manager with the following command: - - ```shell - helm install traffic-manager --namespace ambassador datawire/telepresence - ``` - -### Install into custom namespace - -The Helm chart supports being installed into any namespace, not necessarily `ambassador`. Simply pass a different `namespace` argument to `helm install`. -For example, if you wanted to deploy the traffic manager to the `staging` namespace: - -```bash -helm install traffic-manager --namespace staging datawire/telepresence -``` - -Note that users of Telepresence will need to configure their kubeconfig to find this installation of the Traffic Manager: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` - -See [the kubeconfig documentation](../../reference/config#manager) for more information. - -### Upgrading the Traffic Manager. - -Versions of the Traffic Manager Helm chart are coupled to the versions of the Telepresence CLI that they are intended for. -Thus, for example, if you wish to use Telepresence `v2.4.0`, you'll need to install version `v2.4.0` of the Traffic Manager Helm chart. - -Upgrading the Traffic Manager is the same as upgrading any other Helm chart; for example, if you installed the release into the `ambassador` namespace, and you just wished to upgrade it to the latest version without changing any configuration values: - -```shell -helm repo up -helm upgrade traffic-manager datawire/telepresence --reuse-values --namespace ambassador -``` - -If you want to upgrade the Traffic-Manager to a specific version, add a `--version` flag with the version number to the upgrade command. For example: `--version v2.4.1` - -## RBAC - -### Installing a namespace-scoped traffic manager - -You might not want the Traffic Manager to have permissions across the entire kubernetes cluster, or you might want to be able to install multiple traffic managers per cluster (for example, to separate them by environment). -In these cases, the traffic manager supports being installed with a namespace scope, allowing cluster administrators to limit the reach of a traffic manager's permissions. - -For example, suppose you want a Traffic Manager that only works on namespaces `dev` and `staging`. -To do this, create a `values.yaml` like the following: - -```yaml -managerRbac: - create: true - namespaced: true - namespaces: - - dev - - staging -``` - -This can then be installed via: - -```bash -helm install traffic-manager --namespace staging datawire/telepresence -f ./values.yaml -``` - -**NOTE** Do not install namespace-scoped Traffic Managers and a global Traffic Manager in the same cluster, as it could have unexpected effects. - -#### Namespace collision detection - -The Telepresence Helm chart will try to prevent namespace-scoped Traffic Managers from managing the same namespaces. -It will do this by creating a ConfigMap, called `traffic-manager-claim`, in each namespace that a given install manages. - -So, for example, suppose you install one Traffic Manager to manage namespaces `dev` and `staging`, as: - -```bash -helm install traffic-manager --namespace dev datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={dev,staging}' -``` - -You might then attempt to install another Traffic Manager to manage namespaces `staging` and `prod`: - -```bash -helm install traffic-manager --namespace prod datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={staging,prod}' -``` - -This would fail with an error: - -``` -Error: rendered manifests contain a resource that already exists. Unable to continue with install: ConfigMap "traffic-manager-claim" in namespace "staging" exists and cannot be imported into the current release: invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-namespace" must equal "prod": current value is "dev" -``` - -To fix this error, fix the overlap either by removing `staging` from the first install, or from the second. - -#### Namespace scoped user permissions - -Optionally, you can also configure user rbac to be scoped to the same namespaces as the manager itself. -You might want to do this if you don't give your users permissions throughout the cluster, and want to make sure they only have the minimum set required to perform telepresence commands on certain namespaces. - -Continuing with the `dev` and `staging` example from the previous section, simply add the following to `values.yaml` (make sure you set the `subjects`!): - -```yaml -clientRbac: - create: true - - # These are the users or groups to which the user rbac will be bound. - # This MUST be set. - subjects: {} - # - kind: User - # name: jane - # apiGroup: rbac.authorization.k8s.io - - namespaced: true - - namespaces: - - dev - - staging -``` - -#### Namespace-scoped webhook - -If you wish to use the traffic-manager's [mutating webhook](../../reference/cluster-config#mutating-webhook) with a namespace-scoped traffic manager, you will have to ensure that each namespace has an `app.kubernetes.io/name` label that is identical to its name: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: staging - labels: - app.kubernetes.io/name: staging -``` - -You can also use `kubectl label` to add the label to an existing namespace, e.g.: - -```shell -kubectl label namespace staging app.kubernetes.io/name=staging -``` - -This is required because the mutating webhook will use the name label to find namespaces to operate on. - -**NOTE** This labelling happens automatically in kubernetes >= 1.21. - -### Installing RBAC only - -Telepresence Traffic Manager does require some [RBAC](../../reference/rbac/) for the traffic-manager deployment itself, as well as for users. -To make it easier for operators to introspect / manage RBAC separately, you can use `rbac.only=true` to -only create the rbac-related objects. -Additionally, you can use `clientRbac.create=true` and `managerRbac.create=true` to toggle which subset(s) of RBAC objects you wish to create. diff --git a/docs/pre-release/install/index.md b/docs/pre-release/install/index.md deleted file mode 100644 index 355ad2c5..00000000 --- a/docs/pre-release/install/index.md +++ /dev/null @@ -1,152 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install - -Install Telepresence by running the commands below for your OS. If you are not the administrator of your cluster, you will need [administrative RBAC permissions](../reference/rbac#administrating-telepresence) to install and use Telepresence in your cluster. - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## What's Next? - -Follow one of our [quick start guides](../quick-start/) to start using Telepresence, either with our sample app or in your own environment. - -## Installing nightly versions of Telepresence - -We build and publish the contents of the default branch, [release/v2](https://github.com/telepresenceio/telepresence), of Telepresence -nightly, Monday through Friday, for macOS (Intel and Apple silicon), Linux, and Windows. - -The tags are formatted like so: `vX.Y.Z-nightly-$gitShortHash`. - -`vX.Y.Z` is the most recent release of Telepresence with the patch version (Z) bumped one higher. -For example, if our last release was 2.3.4, nightly builds would start with v2.3.5, until a new -version of Telepresence is released. - -`$gitShortHash` will be the short hash of the git commit of the build. - -Use these URLs to download the most recent nightly build. - - - - -```shell -# Intel Macs -https://app.getambassador.io/download/tel2/darwin/amd64/nightly/telepresence - -# Apple silicon Macs -https://app.getambassador.io/download/tel2/darwin/arm64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/windows/amd64/nightly/telepresence.zip -``` - - - - -## Installing older versions of Telepresence - -Use these URLs to download an older version for your OS (including older nightly builds), replacing `x.y.z` with the versions you want. - - - - -```shell -# Intel Macs -https://app.getambassador.io/download/tel2/darwin/amd64/x.y.z/telepresence - -# Apple silicon Macs -https://app.getambassador.io/download/tel2/darwin/arm64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/windows/amd64/x.y.z/telepresence -``` - - - diff --git a/docs/pre-release/install/migrate-from-legacy.md b/docs/pre-release/install/migrate-from-legacy.md deleted file mode 100644 index 0f227f2a..00000000 --- a/docs/pre-release/install/migrate-from-legacy.md +++ /dev/null @@ -1,109 +0,0 @@ -# Migrate from legacy Telepresence - -Telepresence (formerly referenced as Telepresence 2, which is the current major version) has different mechanics and requires a different mental model from [legacy Telepresence 1](https://www.telepresence.io/docs/v1/) when working with local instances of your services. - -In legacy Telepresence, a pod running a service was swapped with a pod running the Telepresence proxy. This proxy received traffic intended for the service, and sent the traffic onward to the target workstation or laptop. We called this mechanism "swap-deployment". - -In practice, this mechanism, while simple in concept, had some challenges. Losing the connection to the cluster would leave the deployment in an inconsistent state. Swapping the pods would take time. - -Telepresence 2 introduces a [new -architecture](../../reference/architecture/) built around "intercepts" -that addresses these problems. With the new Telepresence, a sidecar -proxy ("traffic agent") is injected onto the pod. The proxy then -intercepts traffic intended for the Pod and routes it to the -workstation/laptop. The advantage of this approach is that the -service is running at all times, and no swapping is used. By using -the proxy approach, we can also do personal intercepts, where rather -than re-routing all traffic to the laptop/workstation, it only -re-routes the traffic designated as belonging to that user, so that -multiple developers can intercept the same service at the same time -without disrupting normal operation or disrupting eacho. - -Please see [the Telepresence quick start](../../quick-start/) for an introduction to running intercepts and [the intercept reference doc](../../reference/intercepts/) for a deep dive into intercepts. - -## Using legacy Telepresence commands - -First please ensure you've [installed Telepresence](../). - -Telepresence is able to translate common legacy Telepresence commands into native Telepresence commands. -So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used -to with the Telepresence binary. - -For example, say you have a deployment (`myserver`) that you want to swap deployment (equivalent to intercept in -Telepresence) with a python server, you could run the following command: - -``` -$ telepresence --swap-deployment myserver --expose 9090 --run python3 -m http.server 9090 -< help text > - -Legacy telepresence command used -Command roughly translates to the following in Telepresence: -telepresence intercept echo-easy --port 9090 -- python3 -m http.server 9090 -running... -Connecting to traffic manager... -Connected to context -Using Deployment myserver -intercepted - Intercept name : myserver - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:9090 - Intercepting : all TCP connections -Serving HTTP on :: port 9090 (http://[::]:9090/) ... -``` - -Telepresence will let you know what the legacy Telepresence command has mapped to and automatically -runs it. So you can get started with Telepresence today, using the commands you are used to -and it will help you learn the Telepresence syntax. - -### Legacy command mapping - -Below is the mapping of legacy Telepresence to Telepresence commands (where they exist and -are supported). - -| Legacy Telepresence Command | Telepresence Command | -|--------------------------------------------------|--------------------------------------------| -| --swap-deployment $workload | intercept $workload | -| --expose localPort[:remotePort] | intercept --port localPort[:remotePort] | -| --swap-deployment $workload --run-shell | intercept $workload -- bash | -| --swap-deployment $workload --run $cmd | intercept $workload -- $cmd | -| --swap-deployment $workload --docker-run $cmd | intercept $workload --docker-run -- $cmd | -| --run-shell | connect -- bash | -| --run $cmd | connect -- $cmd | -| --env-file,--env-json | --env-file, --env-json (haven't changed) | -| --context,--namespace | --context, --namespace (haven't changed) | -| --mount,--docker-mount | --mount, --docker-mount (haven't changed) | - -### Legacy Telepresence command limitations - -Some of the commands and flags from legacy Telepresence either didn't apply to Telepresence or -aren't yet supported in Telepresence. For some known popular commands, such as --method, -Telepresence will include output letting you know that the flag has gone away. For flags that -Telepresence can't translate yet, it will let you know that that flag is "unsupported". - -If Telepresence is missing any flags or functionality that is integral to your usage, please let us know -by [creating an issue](https://github.com/telepresenceio/telepresence/issues) and/or talking to us on our [Slack channel](https://a8r.io/Slack)! - -## Telepresence changes - -Telepresence installs a Traffic Manager in the cluster and Traffic Agents alongside workloads when performing intercepts (including -with `--swap-deployment`) and leaves them. If you use `--swap-deployment`, the intercept will be left once the process -dies, but the agent will remain. There's no harm in leaving the agent running alongside your service, but when you -want to remove them from the cluster, the following Telepresence command will help: -``` -$ telepresence uninstall --help -Uninstall telepresence agents and manager - -Usage: - telepresence uninstall [flags] { --agent |--all-agents | --everything } - -Flags: - -d, --agent uninstall intercept agent on specific deployments - -a, --all-agents uninstall intercept agent on all deployments - -e, --everything uninstall agents and the traffic manager - -h, --help help for uninstall - -n, --namespace string If present, the namespace scope for this CLI request -``` - -Since the new architecture deploys a Traffic Manager into the Ambassador namespace, please take a look at -our [rbac guide](../../reference/rbac) if you run into any issues with permissions while upgrading to Telepresence. diff --git a/docs/pre-release/install/telepresence-pro.md b/docs/pre-release/install/telepresence-pro.md deleted file mode 100644 index f7a86bb1..00000000 --- a/docs/pre-release/install/telepresence-pro.md +++ /dev/null @@ -1,66 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install Telepresence Pro - -Telepresence Pro is a replacement to Telepresence's User Daemon -that gives you premium features including: -* Creating intercepts on your local machine from Ambassador Cloud. - -The `telepresence-pro` binary must be installed in the same directory as -`telepresence`. When you run `telepresence login` it will automatically be -installed and placed in the correct location. If you are in an air-gapped -environment or need to install it manually, ensure it is placed in the -correct directory. - - - - -```shell -# In this example, we install the binary in `/usr/local/bin/` since that's where `telepresence` -# is installed by default -# Intel Macs -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel-pro/darwin/amd64/$dlVersion$/latest/telepresence-pro -o /usr/local/bin/telepresence-pro -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence-pro - -# Apple silicon Macs -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel-pro/darwin/arm64/$dlVersion$/latest/telepresence-pro -o /usr/local/bin/telepresence-pro -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence-pro -``` - - - - -```shell -# In this example, we install the binary in `/usr/local/bin/` since that's where `telepresence` -# is installed by default -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel-pro/linux/amd64/$dlVersion$/latest/telepresence-pro -o /usr/local/bin/telepresence-pro -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence-pro -``` - - - - -```powershell -# In this example, we install the binary in `/usr/local/bin/` since that's where `telepresence` -# is installed by default -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence-pro.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel-pro/windows/amd64/$dlVersion$/latest/telepresence-pro.exe -o telepresence-exe - -# 2. Move the exe to your path (We recommend the default directory used by telepresence `C:\telepresence`) -Copy-Item "telepresence-pro.exe" -Destination "C:\telepresence\telepresence-pro.exe" -Force -``` - - - - -# Upgrade Telepresence Pro -There are two options to upgrade Telepresence Pro. You can run `telepresence-pro upgrade` to get the latest -version that is compatible with the current Telepresence version you are using or you can remove the binary -and Telepresence will automatically install it next time you `telepresence login`. diff --git a/docs/pre-release/install/upgrade.md b/docs/pre-release/install/upgrade.md deleted file mode 100644 index 10d0ca13..00000000 --- a/docs/pre-release/install/upgrade.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -import Platform from '@src/components/Platform'; - -# Upgrade Process -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. - - - - -```shell -# Intel Macs - -# Upgrade via brew: -brew upgrade datawire/blackbird/telepresence - -# OR upgrade manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path by passing in -Path C:\my\custom\path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -After upgrading your CLI you must stop any live Telepresence processes by issuing `telepresence quit`, then upgrade the Traffic Manager by running `telepresence connect` - -**Note** that if the Traffic Manager has been installed via Helm, `telepresence connect` will never upgrade it. If you wish to upgrade a Traffic Manager that was installed via the Helm chart, please see the [the Helm documentation](../helm#upgrading-the-traffic-manager) diff --git a/docs/pre-release/quick-start/TelepresenceQuickStartLanding.js b/docs/pre-release/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 537a6325..00000000 --- a/docs/pre-release/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/pre-release/quick-start/demo-node.md b/docs/pre-release/quick-start/demo-node.md deleted file mode 100644 index 5dcbd654..00000000 --- a/docs/pre-release/quick-start/demo-node.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.5/quick-start/demo-node/) diff --git a/docs/pre-release/quick-start/demo-react.md b/docs/pre-release/quick-start/demo-react.md deleted file mode 100644 index 7c7c00cc..00000000 --- a/docs/pre-release/quick-start/demo-react.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.5/quick-start/demo-react/) diff --git a/docs/pre-release/quick-start/go.md b/docs/pre-release/quick-start/go.md deleted file mode 100644 index bd3e1e55..00000000 --- a/docs/pre-release/quick-start/go.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.5/quick-start/go/) diff --git a/docs/pre-release/quick-start/index.md b/docs/pre-release/quick-start/index.md deleted file mode 100644 index f2305d72..00000000 --- a/docs/pre-release/quick-start/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- - description: Telepresence Quick Start. ---- - -import TelepresenceQuickStartLanding from './TelepresenceQuickStartLanding'; - - diff --git a/docs/pre-release/quick-start/qs-cards.js b/docs/pre-release/quick-start/qs-cards.js deleted file mode 100644 index 0d8c7226..00000000 --- a/docs/pre-release/quick-start/qs-cards.js +++ /dev/null @@ -1,69 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Create a Local K8s Dev Environment - - - - Read the advanced guide on how to create your own complete Kubernetes development environment. - - - - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - -
- ); -} diff --git a/docs/pre-release/quick-start/qs-go.md b/docs/pre-release/quick-start/qs-go.md deleted file mode 100644 index c2514635..00000000 --- a/docs/pre-release/quick-start/qs-go.md +++ /dev/null @@ -1,399 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## Create a complete development environment using this demo application - -Apply what you've learned from this guide and employ the Emojivoto application in your own local development environment. See the [Creating a local Kubernetes development environment](../../install/qs-go-advanced/) page to learn more. - -## What's Next? - - diff --git a/docs/pre-release/quick-start/qs-java.md b/docs/pre-release/quick-start/qs-java.md deleted file mode 100644 index 26b60de1..00000000 --- a/docs/pre-release/quick-start/qs-java.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/pre-release/quick-start/qs-node.md b/docs/pre-release/quick-start/qs-node.md deleted file mode 100644 index 3280f10a..00000000 --- a/docs/pre-release/quick-start/qs-node.md +++ /dev/null @@ -1,383 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/pre-release/quick-start/qs-python-fastapi.md b/docs/pre-release/quick-start/qs-python-fastapi.md deleted file mode 100644 index 3360261e..00000000 --- a/docs/pre-release/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,380 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/pre-release/quick-start/qs-python.md b/docs/pre-release/quick-start/qs-python.md deleted file mode 100644 index 481487c7..00000000 --- a/docs/pre-release/quick-start/qs-python.md +++ /dev/null @@ -1,391 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/pre-release/quick-start/telepresence-quickstart-landing.less b/docs/pre-release/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index 37304255..00000000 --- a/docs/pre-release/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,161 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1 { - color: @blue-dark; - font-weight: normal; - letter-spacing: 0.25px; - font-size: 33px; - } - p { - font-size: 0.875rem; - line-height: 24px; - margin: 0; - padding: 0; - } - - .demo-cluster-container { - display: grid; - margin: 40px 0; - grid-template-columns: repeat(2, 1fr); - column-gap: 40px; - @media screen and (max-width: 720px) { - grid-template-columns: repeat(1, 1fr); - row-gap: 50px; - } - } - .main-title-container { - display: flex; - flex-direction: column; - align-items: center; - p { - text-align: center; - font-size: 0.875rem; - } - } - h2.title { - font-size: 1.5rem; - color: @black; - font-weight: normal; - margin: 0 0 10px 0; - padding: 0; - &.underlined { - padding-bottom: 2px; - border-bottom: 3px solid @grey-separator; - text-align: center; - } - strong { - font-weight: 600; - } - } - .reading-time { - color: #7c7c87; - margin: 0; - } - .get-started { - font-size: 0.875rem; - font-weight: 600; - letter-spacing: 1.25px; - display: flex; - align-items: center; - margin: 20px 20px 10px; - text-decoration: none; - &.green { - color: @green; - } - &.green:hover { - color: @green-dark; - } - &.blue { - color: @blue; - } - &.blue:hover { - color: @blue-dark; - } - } - - .box-container { - border: 1.5px solid @grey-separator; - border-radius: 5px; - padding: 10px; - position: relative; - &::before { - content: ''; - position: absolute; - width: 14px; - height: 14px; - border-radius: 50%; - top: 0; - left: 50%; - transform: translate(-50%, -50%); - } - &.green::before { - background: @green; - box-shadow: 0 0 0 5px #00c05b45; - } - &.blue::before { - background: @blue; - box-shadow: 0 0 0 5px #0066ff45; - } - p { - font-size: 0.875rem; - line-height: 24px; - padding: 0; - } - } - .connector-container { - display: flex; - justify-content: center; - span { - background: @grey-separator; - width: 1.5px; - height: 37px; - } - } - - .telepresence-video { - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - h2.telepresence-video-title { - padding: 0; - margin: 0; - } - - strong { - color: @blue; - } - } - - .video-section { - display: grid; - grid-template-columns: 1fr 2fr; - column-gap: 10px; - @media screen and (max-width: 1400px) { - grid-template-columns: 1fr; - } - ul { - font-size: 14px; - margin: 0 10px 6px 0; - } - .video-container { - position: relative; - padding-bottom: 56.25%; // 16:9 aspect ratio - height: 0; - iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - } - } - } -} diff --git a/docs/pre-release/redirects.yml b/docs/pre-release/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/pre-release/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/pre-release/reference/architecture.md b/docs/pre-release/reference/architecture.md deleted file mode 100644 index a7427486..00000000 --- a/docs/pre-release/reference/architecture.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -When you run telepresence login, Telepresence installs an enhanced Telepresence Daemon. This replaces the open source -User Daemon and allows you to create intercepts on your local machine from Ambassador Cloud. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `kubectl describe -pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/pre-release/reference/client.md b/docs/pre-release/reference/client.md deleted file mode 100644 index 491dbbb8..00000000 --- a/docs/pre-release/reference/client.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. -You can append `--help` to each command below to get even more information about its usage. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| [`login`](login) | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `license` | Formats a license from Ambassdor Cloud into a secret that can be [applied to your cluster](../cluster-config#add-license-to-cluster) if you require features of the extension in an air-gapped environment| -| `status` | Shows the current connectivity status | -| `quit` | Tell Telepresence daemons to quit | -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000`. A special flag `--docker-run` can be used to run the local instance [in a docker container](../docker-run). | -| `leave` | Stops an active intercept: `telepresence leave hello` | -| `preview` | Create or remove [preview URLs](../../howtos/preview-urls) for existing intercepts: `telepresence preview create ` | -| `loglevel` | Temporarily change the log-level of the traffic-manager, traffic-agents, and user and root daemons | -| `gather-logs` | Gather logs from traffic-manager, traffic-agents, user, and root daemons, and export them into a zip file that can be shared with others or included with a github issue. Use `--get-pod-yaml` to include the yaml for the `traffic-manager` and `traffic-agent`s. Use `--anonymize` to replace the actual pod names + namespaces used for the `traffic-manager` and pods containing `traffic-agent`s in the logs. | -| `version` | Show version of Telepresence CLI + Traffic-Manager (if connected) | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `current-cluster-id` | Get cluster ID for your kubernetes cluster, used for [configuring license](../cluster-config#add-license-to-cluster) in an air-gapped environment | diff --git a/docs/pre-release/reference/client/login.md b/docs/pre-release/reference/client/login.md deleted file mode 100644 index 78335197..00000000 --- a/docs/pre-release/reference/client/login.md +++ /dev/null @@ -1,61 +0,0 @@ -# Telepresence Login - -```console -$ telepresence login --help -Authenticate to Ambassador Cloud - -Usage: - telepresence login [flags] - -Flags: - --apikey string Static API key to use instead of performing an interactive login -``` - -## Description - -Use `telepresence login` to explicitly authenticate with [Ambassador -Cloud](https://www.getambassador.io/docs/cloud). Unless the -[`skipLogin` option](../../config) is set, other commands will -automatically invoke the `telepresence login` interactive login -procedure as nescessary, so it is rarely nescessary to explicitly run -`telepresence login`; it should only be truly nescessary to explictly -run `telepresence login` when you require a non-interactive login. - -The normal interactive login procedure involves launching a web -browser, a user interacting with that web browser, and finally having -the web browser make callbacks to the local Telepresence process. If -it is not possible to do this (perhaps you are using a headless remote -box via SSH, or are using Telepresence in CI), then you may instead -have Ambassador Cloud issue an API key that you pass to `telepresence -login` with the `--apikey` flag. - -## Telepresence - -When you run `telepresence login`, the CLI installs -a Telepresence binary. The Telepresence enhanced free client of the [User -Daemon](../../architecture) communicates with the Ambassador Cloud to -provide fremium features including the ability to create intercepts from -Ambassador Cloud. - -## Acquiring an API key - -1. Log in to Ambassador Cloud at https://app.getambassador.io/ . - -2. Click on your profile icon in the upper-left: ![Screenshot with the - mouse pointer over the upper-left profile icon](./apikey-2.png) - -3. Click on the "API Keys" menu button: ![Screenshot with the mouse - pointer over the "API Keys" menu button](./apikey-3.png) - -4. Click on the "generate new key" button in the upper-right: - ![Screenshot with the mouse pointer over the "generate new key" - button](./apikey-4.png) - -5. Enter a description for the key (perhaps the name of your laptop, - or perhaps the "CI"), and click "generate api key" to create it. - -You may now pass the API key as `KEY` to `telepresence login --apikey=KEY`. - -Telepresence will use that "master" API key to create narrower keys -for different components of Telepresence. You will see these appear -in the Ambassador Cloud web interface. diff --git a/docs/pre-release/reference/client/login/apikey-2.png b/docs/pre-release/reference/client/login/apikey-2.png deleted file mode 100644 index 1379502a..00000000 Binary files a/docs/pre-release/reference/client/login/apikey-2.png and /dev/null differ diff --git a/docs/pre-release/reference/client/login/apikey-3.png b/docs/pre-release/reference/client/login/apikey-3.png deleted file mode 100644 index 4559b784..00000000 Binary files a/docs/pre-release/reference/client/login/apikey-3.png and /dev/null differ diff --git a/docs/pre-release/reference/client/login/apikey-4.png b/docs/pre-release/reference/client/login/apikey-4.png deleted file mode 100644 index 25c6581a..00000000 Binary files a/docs/pre-release/reference/client/login/apikey-4.png and /dev/null differ diff --git a/docs/pre-release/reference/cluster-config.md b/docs/pre-release/reference/cluster-config.md deleted file mode 100644 index 1db27ef7..00000000 --- a/docs/pre-release/reference/cluster-config.md +++ /dev/null @@ -1,312 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; -import { ClusterConfig } from '@src/components/Docs/Telepresence'; - -# Cluster-side configuration - -For the most part, Telepresence doesn't require any special -configuration in the cluster and can be used right away in any -cluster (as long as the user has adequate [RBAC permissions](../rbac) -and the cluster's server version is `1.17.0` or higher). - -However, some advanced features do require some configuration in the -cluster. - -## TLS - -In this example, other applications in the cluster expect to speak TLS to your -intercepted application (perhaps you're using a service-mesh that does -mTLS). - -In order to use `--mechanism=http` (or any features that imply -`--mechanism=http`) you need to tell Telepresence about the TLS -certificates in use. - -Tell Telepresence about the certificates in use by adjusting your -[workload's](../intercepts/#supported-workloads) Pod template to set a couple of -annotations on the intercepted Pods: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ "getambassador.io/inject-terminating-tls-secret": "your-terminating-secret" # optional -+ "getambassador.io/inject-originating-tls-secret": "your-originating-secret" # optional - spec: -+ serviceAccountName: "your-account-that-has-rbac-to-read-those-secrets" - containers: -``` - -- The `getambassador.io/inject-terminating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS server - certificate to use for decrypting and responding to incoming - requests. - - When Telepresence modifies the Service and workload port - definitions to point at the Telepresence Agent sidecar's port - instead of your application's actual port, the sidecar will use this - certificate to terminate TLS. - -- The `getambassador.io/inject-originating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS - client certificate to use for communicating with your application. - - You will need to set this if your application expects incoming - requests to speak TLS (for example, your - code expects to handle mTLS itself instead of letting a service-mesh - sidecar handle mTLS for it, or the port definition that Telepresence - modified pointed at the service-mesh sidecar instead of at your - application). - - If you do set this, you should to set it to the - same client certificate Secret that you configure the Ambassador - Edge Stack to use for mTLS. - -It is only possible to refer to a Secret that is in the same Namespace -as the Pod. - -The Pod will need to have permission to `get` and `watch` each of -those Secrets. - -Telepresence understands `type: kubernetes.io/tls` Secrets and -`type: istio.io/key-and-cert` Secrets; as well as `type: Opaque` -Secrets that it detects to be formatted as one of those types. - -## Air gapped cluster - -If your cluster is on an isolated network such that it cannot -communicate with Ambassador Cloud, then some additional configuration -is required to acquire a license key in order to use personal -intercepts. - -### Create a license - -1. - -2. Generate a new license (if one doesn't already exist) by clicking *Generate New License*. - -3. You will be prompted for your Cluster ID. Ensure your -kubeconfig context is using the cluster you want to create a license for then -run this command to generate the Cluster ID: - - ``` - $ telepresence current-cluster-id - - Cluster ID: - ``` - -4. Click *Generate API Key* to finish generating the license. - -5. On the licenses page, download the license file associated with your cluster. - -### Add license to cluster -There are two separate ways you can add the license to your cluster: manually creating and deploying -the license secret or having the helm chart manage the secret - -You only need to do one of the two options. - -#### Manual deploy of license secret - -1. Use this command to generate a Kubernetes Secret config using the license file: - - ``` - $ telepresence license -f - - apiVersion: v1 - data: - hostDomain: - license: - kind: Secret - metadata: - creationTimestamp: null - name: systema-license - namespace: ambassador - ``` - -2. Save the output as a YAML file and apply it to your -cluster with `kubectl`. - -3. When deploying the `traffic-manager` chart, you must add the additional values when running `helm install` by putting -the following into a file (for the example we'll assume it's called license-values.yaml) - - ``` - licenseKey: - # This mounts the secret into the traffic-manager - create: true - secret: - # This tells the helm chart not to create the secret since you've created it yourself - create: false - ``` - -4. Install the helm chart into the cluster - - ``` - helm install traffic-manager -n ambassador datawire/telepresence --create-namespace -f license-values.yaml - ``` - -5. Ensure that you have the docker image for the Smart Agent (datawire/ambassador-telepresence-agent:1.11.0) -pulled and in a registry your cluster can pull from. - -6. Have users use the `images` [config key](../config/#images) keys so telepresence uses the aforementioned image for their agent. - -#### Helm chart manages the secret - -1. Get the jwt token from the downloaded license file - - ``` - $ cat ~/Downloads/ambassador.License_for_yourcluster - eyJhbGnotarealtoken.butanexample - ``` - -2. Create the following values file, substituting your real jwt token in for the one used in the example below. -(for this example we'll assume the following is placed in a file called license-values.yaml) - - ``` - licenseKey: - # This mounts the secret into the traffic-manager - create: true - # This is the value from the license file you download. this value is an example and will not work - value: eyJhbGnotarealtoken.butanexample - secret: - # This tells the helm chart to create the secret - create: true - ``` - -3. Install the helm chart into the cluster - - ``` - helm install traffic-manager charts/telepresence -n ambassador --create-namespace -f license-values.yaml - ``` - -Users will now be able to use preview intercepts with the -`--preview-url=false` flag. Even with the license key, preview URLs -cannot be used without enabling direct communication with Ambassador -Cloud, as Ambassador Cloud is essential to their operation. - -If using Helm to install the server-side components, see the chart's [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence) to learn how to configure the image registry and license secret. - -Have clients use the [skipLogin](../config/#cloud) key to ensure the cli knows it is operating in an -air-gapped environment. - -## Mutating Webhook - -By default, Telepresence updates the intercepted workload (Deployment, StatefulSet, ReplicaSet) -template to add the [Traffic Agent](../architecture/#traffic-agent) sidecar container and update the -port definitions. If you use GitOps workflows (with tools like ArgoCD) to automatically update your -cluster so that it reflects the desired state from an external Git repository, this behavior can make -your workload out of sync with that external desired state. - -To solve this issue, you can use Telepresence's Mutating Webhook alternative mechanism. Intercepted -workloads will then stay untouched and only the underlying pods will be modified to inject the Traffic -Agent sidecar container and update the port definitions. - -Simply add the `telepresence.getambassador.io/inject-traffic-agent: enabled` annotation to your -workload template's annotations: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ telepresence.getambassador.io/inject-traffic-agent: enabled - spec: - containers: -``` - -### Service Port Annotation - -A service port annotation can be added to the workload to make the Mutating Webhook select a specific port -in the service. This is necessary when the service has multiple ports. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-port: https - spec: - containers: -``` - -### Service Name Annotation - -A service name annotation can be added to the workload to make the Mutating Webhook select a specific Kubernetes service. -This is necessary when the workload is exposed by multiple services. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-name: my-service - spec: - containers: -``` - -### Note on Numeric Ports - -If the targetPort of your intercepted service is pointing at a port number, in addition to -injecting the Traffic Agent sidecar, Telepresence will also inject an initContainer that will -reconfigure the pod's firewall rules to redirect traffic to the Traffic Agent. - - -Note that this initContainer requires `NET_ADMIN` capabilities. -If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. - - - -This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. -To enable running as GID 7777 on a specific openshift namespace, run: -oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE - - -If you need to use numeric ports without the aforementioned capabilities, you can [manually install the agent](../intercepts/manual-agent) - -For example, the following service is using a numeric port, so Telepresence would inject an initContainer into it: -```yaml -apiVersion: v1 -kind: Service -metadata: - name: your-service -spec: - type: ClusterIP - selector: - service: your-service - ports: - - port: 80 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: your-service - labels: - service: your-service -spec: - replicas: 1 - selector: - matchLabels: - service: your-service - template: - metadata: - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled - labels: - service: your-service - spec: - containers: - - name: your-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 -``` diff --git a/docs/pre-release/reference/config.md b/docs/pre-release/reference/config.md deleted file mode 100644 index 6722bc93..00000000 --- a/docs/pre-release/reference/config.md +++ /dev/null @@ -1,285 +0,0 @@ -# Laptop-side configuration - -## Global Configuration -Telepresence uses a `config.yml` file to store and change certain global configuration values that will be used for all clusters you use Telepresence with. The location of this file varies based on your OS: - -* macOS: `$HOME/Library/Application Support/telepresence/config.yml` -* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` -* Windows: `%APPDATA%\telepresence\config.yml` - -For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. - -### Values - -The config file currently supports values for the `timeouts`, `logLevels`, `images`, `cloud`, and `grpc` keys. - -Here is an example configuration to show you the conventions of how Telepresence is configured: -**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** - -```yaml -timeouts: - agentInstall: 1m - intercept: 10s -logLevels: - userDaemon: debug -images: - registry: privateRepo # This overrides the default docker.io/datawire repo - agentImage: ambassador-telepresence-agent:1.8.0 # This overrides the agent image to inject when intercepting -cloud: - refreshMessages: 24h # Refresh messages from cloud every 24 hours instead of the default, which is 1 week. -grpc: - maxReceiveSize: 10Mi -telepresenceAPI: - port: 9980 -``` - -#### Timeouts - -Values for `timeouts` are all durations either as a number of seconds -or as a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings -can be fractional (`1.5h`) or combined (`2h45m`). - -These are the valid fields for the `timeouts` key: - -| Field | Description | Type | Default | -|-------------------------|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|------------| -| `agentInstall` | Waiting for Traffic Agent to be installed | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | -| `apply` | Waiting for a Kubernetes manifest to be applied | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 1 minute | -| `clusterConnect` | Waiting for cluster to be connected | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | -| `intercept` | Waiting for an intercept to become active | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | -| `proxyDial` | Waiting for an outbound connection to be established | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | -| `trafficManagerConnect` | Waiting for the Traffic Manager API to connect for port fowards | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | -| `trafficManagerAPI` | Waiting for connection to the gPRC API after `trafficManagerConnect` is successful | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 15 seconds | -| `helm` | Waiting for Helm operations (e.g. `install`) on the Traffic Manager | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | - -#### Log Levels - -Values for the `logLevels` fields are one of the following strings, -case insensitive: - - - `trace` - - `debug` - - `info` - - `warning` or `warn` - - `error` - - `fatal` - - `panic` - -For whichever log-level you select, you will get logs labeled with that level and of higher severity. -(e.g. if you use `info`, you will also get logs labeled `error`. You will NOT get logs labeled `debug`. - -These are the valid fields for the `logLevels` key: - -| Field | Description | Type | Default | -|--------------|---------------------------------------------------------------------|---------------------------------------------|---------| -| `userDaemon` | Logging level to be used by the User Daemon (logs to connector.log) | [loglevel][logrus-level] [string][yaml-str] | debug | -| `rootDaemon` | Logging level to be used for the Root Daemon (logs to daemon.log) | [loglevel][logrus-level] [string][yaml-str] | info | - -#### Images -Values for `images` are strings. These values affect the objects that are deployed in the cluster, -so it's important to ensure users have the same configuration. - -Additionally, you can deploy the server-side components with [Helm](../../install/helm), to prevent them -from being overridden by a client's config and use the [mutating-webhook](../cluster-config/#mutating-webhook) -to handle installation of the `traffic-agents`. - -These are the valid fields for the `images` key: - -| Field | Description | Type | Default | -|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------|----------------------| -| `registry` | Docker registry to be used for installing the Traffic Manager and default Traffic Agent. If not using a helm chart to deploy server-side objects, changing this value will create a new traffic-manager deployment when using Telepresence commands. Additionally, changing this value will update installed default `traffic-agents` to use the new registry when creating a new intercept. | Docker registry name [string][yaml-str] | `docker.io/datawire` | -| `agentImage` | `$registry/$imageName:$imageTag` to use when installing the Traffic Agent. Changing this value will update pre-existing `traffic-agents` to use this new image. *The `registry` value is not used for the `traffic-agent` if you have this value set.* | qualified Docker image name [string][yaml-str] | (unset) | -| `webhookRegistry` | The container `$registry` that the [Traffic Manager](../cluster-config/#mutating-webhook) will use with the `webhookAgentImage` *This value is only used if a new `traffic-manager` is deployed* | Docker registry name [string][yaml-str] | `docker.io/datawire` | -| `webhookAgentImage` | The container image that the [Traffic Manager](../cluster-config/#mutating-webhook) will pull from the `webhookRegistry` when installing the Traffic Agent in annotated pods *This value is only used if a new `traffic-manager` is deployed* | non-qualified Docker image name [string][yaml-str] | (unset) | - -#### Cloud -Values for `cloud` are listed below and their type varies, so please see the chart for the expected type for each config value. -These fields control how the client interacts with the Cloud service. - -| Field | Description | Type | Default | -|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------|---------| -| `skipLogin` | Whether the CLI should skip automatic login to Ambassador Cloud. If set to true, in order to perform personal intercepts you must have a [license key](../cluster-config/#air-gapped-cluster) installed in the cluster. | [bool][yaml-bool] | false | -| `refreshMessages` | How frequently the CLI should communicate with Ambassador Cloud to get new command messages, which also resets whether the message has been raised or not. You will see each message at most once within the duration given by this config | [duration][go-duration] [string][yaml-str] | 168h | -| `systemaHost` | The host used to communicate with Ambassador Cloud | [string][yaml-str] | app.getambassador.io | -| `systemaPort` | The port used with `systemaHost` to communicate with Ambassador Cloud | [string][yaml-str] | 443 | - -Telepresence attempts to auto-detect if the cluster is capable of -communication with Ambassador Cloud, but may still prompt you to log -in in cases where only the on-laptop client wishes to communicate with -Ambassador Cloud. If you want those auto-login points to be disabled -as well, or would like it to not attempt to communicate with -Ambassador Cloud at all (even for the auto-detection), then be sure to -set the `skipLogin` value to `true`. - -Reminder: To use personal intercepts, which normally require a login, -you must have a license key in your cluster and specify which -`agentImage` should be installed by also adding the following to your -`config.yml`: - -```yaml -images: - agentImage: / -``` - -#### Grpc -The `maxReceiveSize` determines how large a message that the workstation receives via gRPC can be. The default is 4Mi (determined by gRPC). All traffic to and from the cluster is tunneled via gRPC. - -The size is measured in bytes. You can express it as a plain integer or as a fixed-point number using E, G, M, or K. You can also use the power-of-two equivalents: Gi, Mi, Ki. For example, the following represent roughly the same value: -``` -128974848, 129e6, 129M, 123Mi -``` - -#### RESTful API server -The `telepresenceAPI` controls the behavior of Telepresence's RESTful API server that can be queried for additional information about ongoing intercepts. When present, and the `port` is set to a valid port number, it's propagated to the auto-installer so that application containers that can be intercepted gets the `TELEPRESENCE_API_PORT` environment set. The server can then be queried at `localhost:`. In addition, the `traffic-agent` and the `user-daemon` on the workstation that performs an intercept will start the server on that port. -If the `traffic-manager` is auto-installed, its webhook agent injector will be configured to add the `TELEPRESENCE_API_PORT` environment to the app container when the `traffic-agent` is injected. -See [RESTful API server](../restapi) for more info. - -#### Daemons - -`daemons` controls which binary to use for the user daemon. By default it will -use the Telepresence binary. For example, this can be used to tell Telepresence to -use the Telepresence Pro binary. - -| Field | Description | Type | Default | -|--------------------|-------------------------------------------------------------|--------------------|--------------------------------------| -| `userDaemonBinary` | The path to the binary you want to use for the User Daemon. | [string][yaml-str] | The path to Telepresence executable | - - -## Per-Cluster Configuration -Some configuration is not global to Telepresence and is actually specific to a cluster. Thus, we store that config information in your kubeconfig file, so that it is easier to maintain per-cluster configuration. - -### Values -The current per-cluster configuration supports `dns`, `alsoProxy`, and `manager` keys. -To add configuration, simply add a `telepresence.io` entry to the cluster in your kubeconfig like so: - -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - also-proxy: - manager: - name: example-cluster -``` -#### DNS -The fields for `dns` are: local-ip, remote-ip, exclude-suffixes, include-suffixes, and lookup-timeout. - -| Field | Description | Type | Default | -|--------------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------|-----------------------------------------------------------------------------| -| `local-ip` | The address of the local DNS server. This entry is only used on Linux systems that are not configured to use systemd-resolved. | IP address [string][yaml-str] | first `nameserver` mentioned in `/etc/resolv.conf` | -| `remote-ip` | The address of the cluster's DNS service. | IP address [string][yaml-str] | IP of the `kube-dns.kube-system` or the `dns-default.openshift-dns` service | -| `exclude-suffixes` | Suffixes for which the DNS resolver will always fail (or fallback in case of the overriding resolver) | [sequence][yaml-seq] of [strings][yaml-str] | `[".arpa", ".com", ".io", ".net", ".org", ".ru"]` | -| `include-suffixes` | Suffixes for which the DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | -| `lookup-timeout` | Maximum time to wait for a cluster side host lookup. | [duration][go-duration] [string][yaml-str] | 4 seconds | - -Here is an example kubeconfig: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - include-suffixes: - - .se - exclude-suffixes: - - .com - name: example-cluster -``` - - -#### AlsoProxy - -When using `also-proxy`, you provide a list of subnets after the key in your kubeconfig file to be added to the TUN device. -All connections to addresses that the subnet spans will be dispatched to the cluster - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - also-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -#### NeverProxy - -When using `never-proxy` you provide a list of subnets after the key in your kubeconfig file. These will never be routed via the -TUN device, even if they fall within the subnets (pod or service) for the cluster. Instead, whatever route they have before -telepresence connects is the route they will keep. - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - never-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -##### Using AlsoProxy together with NeverProxy - -Never proxy and also proxy are implemented as routing rules, meaning that when the two conflict, regular routing routes apply. -Usually this means that the most specific route will win. - -So, for example, if an `also-proxy` subnet falls within a broader `never-proxy` subnet: - -```yaml -never-proxy: [10.0.0.0/16] -also-proxy: [10.0.5.0/24] -``` - -Then the specific `also-proxy` of `10.0.5.0/24` will be proxied by the TUN device, whereas the rest of `10.0.0.0/16` will not. - -Conversely if a `never-proxy` subnet is inside a larger `also-proxy` subnet: - -```yaml -also-proxy: [10.0.0.0/16] -never-proxy: [10.0.5.0/24] -``` - -Then all of the also-proxy of `10.0.0.0/16` will be proxied, with the exception of the specific `never-proxy` of `10.0.5.0/24` - -#### Manager - -The `manager` key contains configuration for finding the `traffic-manager` that telepresence will connect to. It supports one key, `namespace`, indicating the namespace where the traffic manager is to be found - -Here is an example kubeconfig that will instruct telepresence to connect to a manager in namespace `staging`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` - -[yaml-bool]: https://yaml.org/type/bool.html -[yaml-float]: https://yaml.org/type/float.html -[yaml-int]: https://yaml.org/type/int.html -[yaml-seq]: https://yaml.org/type/seq.html -[yaml-str]: https://yaml.org/type/str.html -[go-duration]: https://pkg.go.dev/time#ParseDuration -[logrus-level]: https://github.com/sirupsen/logrus/blob/v1.8.1/logrus.go#L25-L45 diff --git a/docs/pre-release/reference/dns.md b/docs/pre-release/reference/dns.md deleted file mode 100644 index e38fbc61..00000000 --- a/docs/pre-release/reference/dns.md +++ /dev/null @@ -1,75 +0,0 @@ -# DNS resolution - -The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces of currently active intercepts. Processes running locally on the desktop will have network access to all services in the such namespaces by service-name only. - -All intercepts contribute to the DNS resolver, even those that do not use the `--namespace=` option. This is because `--namespace default` is implied, and in this context, `default` is treated just like any other namespace. - -No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). - -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. - -No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. - -``` -$ telepresence connect - - Connecting to traffic manager... - Connected to context default (https://) - -$ telepresence list - - web-app-5d568ccc6b : ready to intercept (traffic-agent not yet installed) - emoji : ready to intercept (traffic-agent not yet installed) - web : ready to intercept (traffic-agent not yet installed) - web-app-5d568ccc6b : ready to intercept (traffic-agent not yet installed) - -$ curl web-app:80 - - curl: (6) Could not resolve host: web-app - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl web-app.emojivoto:80 - - - - - - Emoji Vote - ... -``` - -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. - -``` -$ telepresence intercept web --port 8080 - - Using Deployment web - intercepted - Intercept name : web - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Volume Mount Point: /tmp/telfs-166119801 - Intercepting : HTTP requests that match all headers: - 'x-telepresence-intercept-id: 8eac04e3-bf24-4d62-b3ba-35297c16f5cd:web' - -$ curl webapp:80 - - - - - - Emoji Vote - ... -``` - -Now curling that service by its short name works and will as long as the intercept is active. - -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. - -See [Outbound connectivity](../routing/#dns-resolution) for details on DNS lookups. diff --git a/docs/pre-release/reference/docker-run.md b/docs/pre-release/reference/docker-run.md deleted file mode 100644 index 2262f0a5..00000000 --- a/docs/pre-release/reference/docker-run.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -Description: "How a Telepresence intercept can run a Docker container with configured environment and volume mounts." ---- - -# Using Docker for intercepts - -If you want your intercept to go to a Docker container on your laptop, use the `--docker-run` option. It creates the intercept, runs your container in the foreground, then automatically ends the intercept when the container exits. - -`telepresence intercept --port --docker-run -- ` - -The `--` separates flags intended for `telepresence intercept` from flags intended for `docker run`. - -## Example - -Imagine you are working on a new version of a your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. - -`telepresence intercept frontend-v1 --port 8000 --docker-run -- frontend-v2` - -## Ports - -The `--port` flag can specify an additional port when `--docker-run` is used so that the local and container port can be different. This is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. - -## Flags - -Telepresence will automatically pass some relevant flags to Docker in order to connect the container with the intercept. Those flags are combined with the arguments given after `--` on the command line. - -- `--dns-search tel2-search` Enables single label name lookups in intercepted namespaces -- `--env-file ` Loads the intercepted environment -- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line -- `-p ` The local port for the intercept and the container port -- `-v ` Volume mount specification, see CLI help for `--mount` and `--docker-mount` flags for more info diff --git a/docs/pre-release/reference/environment.md b/docs/pre-release/reference/environment.md deleted file mode 100644 index 7f83ff11..00000000 --- a/docs/pre-release/reference/environment.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." ---- - -# Environment variables - -Telepresence can import environment variables from the cluster pod when running an intercept. -You can then use these variables with the code running on your laptop of the service being intercepted. - -There are three options available to do this: - -1. `telepresence intercept [service] --port [port] --env-file=FILENAME` - - This will write the environment variables to a Docker Compose `.env` file. This file can be used with `docker-compose` when starting containers locally. Please see the Docker documentation regarding the [file syntax](https://docs.docker.com/compose/env-file/) and [usage](https://docs.docker.com/compose/environment-variables/) for more information. - -2. `telepresence intercept [service] --port [port] --env-json=FILENAME` - - This will write the environment variables to a JSON file. This file can be injected into other build processes. - -3. `telepresence intercept [service] --port [port] -- [COMMAND]` - - This will run a command locally with the pod's environment variables set on your laptop. Once the command quits the intercept is stopped (as if `telepresence leave [service]` was run). This can be used in conjunction with a local server command, such as `python [FILENAME]` or `node [FILENAME]` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. - - Another use would be running a subshell, Bash for example: - - `telepresence intercept [service] --port [port] -- /bin/bash` - - This would start the intercept then launch the subshell on your laptop with all the same variables set as on the pod. - -## Telepresence Environment Variables - -Telepresence adds some useful environment variables in addition to the ones imported from the intercepted pod: - -### TELEPRESENCE_ROOT -Directory where all remote volumes mounts are rooted. See [Volume Mounts](../volume/) for more info. - -### TELEPRESENCE_MOUNTS -Colon separated list of remotely mounted directories. - -### TELEPRESENCE_CONTAINER -The name of the intercepted container. Useful when a pod has several containers, and you want to know which one that was intercepted by Telepresence. - -### TELEPRESENCE_INTERCEPT_ID -ID of the intercept (same as the "x-intercept-id" http header). - -Useful if you need special behavior when intercepting a pod. One example might be when dealing with pub/sub systems like Kafka, where all processes that don't have the `TELEPRESENCE_INTERCEPT_ID` set can filter out all messages that contain an `x-intercept-id` header, while those that do, instead filter based on a matching `x-intercept-id` header. This is to assure that messages belonging to a certain intercept always are consumed by the intercepting process. diff --git a/docs/pre-release/reference/inside-container.md b/docs/pre-release/reference/inside-container.md deleted file mode 100644 index f83ef357..00000000 --- a/docs/pre-release/reference/inside-container.md +++ /dev/null @@ -1,37 +0,0 @@ -# Running Telepresence inside a container - -It is sometimes desirable to run Telepresence inside a container. One reason can be to avoid any side effects on the workstation's network, another can be to establish multiple sessions with the traffic manager, or even work with different clusters simultaneously. - -## Building the container - -Building a container with a ready-to-run Telepresence is easy because there are relatively few external dependencies. Add the following to a `Dockerfile`: - -```Dockerfile -# Dockerfile with telepresence and its prerequisites -FROM alpine:3.13 - -# Install Telepresence prerequisites -RUN apk add --no-cache curl iproute2 sshfs - -# Download and install the telepresence binary -RUN curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence -o telepresence && \ - install -o root -g root -m 0755 telepresence /usr/local/bin/telepresence -``` -In order to build the container, do this in the same directory as the `Dockerfile`: -``` -$ docker build -t tp-in-docker . -``` - -## Running the container - -Telepresence will need access to the `/dev/net/tun` device on your Linux host (or, in case the host isn't Linux, the Linux VM that Docker starts automatically), and a Kubernetes config that identifies the cluster. It will also need `--cap-add=NET_ADMIN` to create its Virtual Network Interface. - -The command to run the container can look like this: -```bash -$ docker run \ - --cap-add=NET_ADMIN \ - --device /dev/net/tun:/dev/net/tun \ - --network=host \ - -v ~/.kube/config:/root/.kube/config \ - -it --rm tp-in-docker -``` diff --git a/docs/pre-release/reference/intercepts/index.md b/docs/pre-release/reference/intercepts/index.md deleted file mode 100644 index 20b0094d..00000000 --- a/docs/pre-release/reference/intercepts/index.md +++ /dev/null @@ -1,354 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Intercepts - -When intercepting a service, Telepresence installs a *traffic-agent* -sidecar in to the workload. That traffic-agent supports one or more -intercept *mechanisms* that it uses to decide which traffic to -intercept. Telepresence has a simple default traffic-agent, however -you can configure a different traffic-agent with more sophisticated -mechanisms either by setting the [`images.agentImage` field in -`config.yml`](../config/#images) or by writing an -[`extensions/${extension}.yml`][extensions] file that tells -Telepresence about a traffic-agent that it can use, what mechanisms -that traffic-agent supports, and command-line flags to expose to the -user to configure that mechanism. You may tell Telepresence which -known mechanism to use with the `--mechanism=${mechanism}` flag or by -setting one of the `--${mechansim}-XXX` flags, which implicitly set -the mechanism; for example, setting `--http-match=auto` implicitly -sets `--mechanism=http`. - -The default open-source traffic-agent only supports the `tcp` -mechanism, which treats the raw layer 4 TCP streams as opaque and -sends all of that traffic down to the developer's workstation. This -means that it is a "global" intercept, affecting all users of the -cluster. - -In addition to the default open-source traffic-agent, Telepresence -already knows about the Ambassador Cloud -[traffic-agent][ambassador-agent], which supports the `http` -mechanism. The `http` mechanism operates at higher layer, working -with layer 7 HTTP, and may intercept specific HTTP requests, allowing -other HTTP requests through to the regular service. This allows for -"personal" intercepts which only intercept traffic tagged as belonging -to a given developer. - -[extensions]: https://pkg.go.dev/github.com/telepresenceio/telepresence/v2@v$version$/pkg/client/cli/extensions -[ambassador-agent]: https://github.com/telepresenceio/telepresence/blob/release/v2/pkg/client/cli/extensions/builtin.go#L30-L50 - -## Intercept behavior when logged in to Ambassador Cloud - -Logging in to Ambassador Cloud (with [`telepresence -login`](../client/login/)) changes the Telepresence defaults in two -ways. - -First, being logged in to Ambassador Cloud causes Telepresence to -default to `--mechanism=http --http-match=auto --http-path-prefix=/` ( -`--mechanism=http` is redundant. It is implied by other `--http-xxx` flags). -If you hadn't been logged in it would have defaulted to -`--mechanism=tcp`. This tells Telepresence to use the Ambassador -Cloud traffic-agent to do smart "personal" intercepts and only -intercept a subset of HTTP requests, rather than just intercepting the -entirety of all TCP connections. This is important for working in a -shared cluster with teammates, and is important for the preview URL -functionality below. See `telepresence intercept --help` for -information on using the `--http-match` and `--http-path-xxx` flags to -customize which requests that are intercepted. - -Secondly, being logged in causes Telepresence to default to -`--preview-url=true`. If you hadn't been logged in it would have -defaulted to `--preview-url=false`. This tells Telepresence to take -advantage of Ambassador Cloud to create a preview URL for this -intercept, creating a shareable URL that automatically sets the -appropriate headers to have requests coming from the preview URL be -intercepted. In order to create the preview URL, it will prompt you -for four settings about how your cluster's ingress is configured. For -each, Telepresence tries to intelligently detect the correct value for -your cluster; if it detects it correctly, may simply press "enter" and -accept the default, otherwise you must tell Telepresence the correct -value. - -When creating an intercept with the `http` mechanism, the -traffic-agent sends a `GET /telepresence-http2-check` request to your -service and to the process running on your local machine at the port -specified in your intercept, in order to determine if they support -HTTP/2. This is required for the intercepts to behave correctly. If -you do not have a service running locally when the intercept is -created, the traffic-agent will use the result it got from checking -the in-cluster service. - -## Supported workloads - -Kubernetes has various -[workloads](https://kubernetes.io/docs/concepts/workloads/). -Currently, Telepresence supports intercepting (installing a -traffic-agent on) `Deployments`, `ReplicaSets`, and `StatefulSets`. - - - -While many of our examples use Deployments, they would also work on -ReplicaSets and StatefulSets - - - -## Specifying a namespace for an intercept - -The namespace of the intercepted workload is specified using the -`--namespace` option. When this option is used, and `--workload` is -not used, then the given name is interpreted as the name of the -workload and the name of the intercept will be constructed from that -name and the namespace. - -```shell -telepresence intercept hello --namespace myns --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept -`hello-myns`. In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the workload is specified. - -```shell -telepresence intercept myhello --namespace myns --workload hello --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept `myhello`. - -## Importing environment variables - -Telepresence can import the environment variables from the pod that is -being intercepted, see [this doc](../environment/) for more details. - -## Creating an intercept without a preview URL - -If you *are not* logged in to Ambassador Cloud, the following command -will intercept all traffic bound to the service and proxy it to your -laptop. This includes traffic coming through your ingress controller, -so use this option carefully as to not disrupt production -environments. - -```shell -telepresence intercept --port= -``` - -If you *are* logged in to Ambassador Cloud, setting the -`--preview-url` flag to `false` is necessary. - -```shell -telepresence intercept --port= --preview-url=false -``` - -This will output an HTTP header that you can set on your request for -that traffic to be intercepted: - -```console -$ telepresence intercept --port= --preview-url=false -Using Deployment -intercepted - Intercept name: - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -```console -$ telepresence status -Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" -User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 1 total - dataprocessingnodeservice: @ -``` - -Finally, run `telepresence leave ` to stop the intercept. - -## Skipping the ingress dialogue - -You can skip the ingress dialogue by setting the relevant parameters using flags. If any of the following flags are set, the dialogue will be skipped and the flag values will be used instead. If any of the required flags are missing, an error will be thrown. - -| Flag | Description | Required | -|------------------|------------------------------------------------------------------|------------| -| `--ingress-host` | The ip address for the ingress | yes | -| `--ingress-port` | The port for the ingress | yes | -| `--ingress-tls` | Whether tls should be used | no | -| `--ingress-l5` | Whether a different ip address should be used in request headers | no | - -## Creating an intercept when a service has multiple ports - -If you are trying to intercept a service that has multiple ports, you -need to tell Telepresence which service port you are trying to -intercept. To specify, you can either use the name of the service -port or the port number itself. To see which options might be -available to you and your service, use kubectl to describe your -service or look in the object's YAML. For more information on multiple -ports, see the [Kubernetes documentation][kube-multi-port-services]. - -[kube-multi-port-services]: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - -```console -$ telepresence intercept --port=: -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -When intercepting a service that has multiple ports, the name of the -service port that has been intercepted is also listed. - -If you want to change which port has been intercepted, you can create -a new intercept the same way you did above and it will change which -service port is being intercepted. - -## Creating an intercept When multiple services match your workload - -Oftentimes, there's a 1-to-1 relationship between a service and a -workload, so telepresence is able to auto-detect which service it -should intercept based on the workload you are trying to intercept. -But if you use something like -[Argo](https://www.getambassador.io/docs/argo/latest/), there may be -two services (that use the same labels) to manage traffic between a -canary and a stable service. - -Fortunately, if you know which service you want to use when -intercepting a workload, you can use the `--service` flag. So in the -aforementioned example, if you wanted to use the `echo-stable` service -when intercepting your workload, your command would look like this: - -```console -$ telepresence intercept echo-rollout- --port --service echo-stable -Using ReplicaSet echo-rollout- -intercepted - Intercept name : echo-rollout- - State : ACTIVE - Workload kind : ReplicaSet - Destination : 127.0.0.1:3000 - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 - Intercepting : all TCP connections -``` - -## Port-forwarding an intercepted container's sidecars - -Sidecars are containers that sit in the same pod as an application -container; they usually provide auxiliary functionality to an -application, and can usually be reached at -`localhost:${SIDECAR_PORT}`. For example, a common use case for a -sidecar is to proxy requests to a database, your application would -connect to `localhost:${SIDECAR_PORT}`, and the sidecar would then -connect to the database, perhaps augmenting the connection with TLS or -authentication. - -When intercepting a container that uses sidecars, you might want those -sidecars' ports to be available to your local application at -`localhost:${SIDECAR_PORT}`, exactly as they would be if running -in-cluster. Telepresence's `--to-pod ${PORT}` flag implements this -behavior, adding port-forwards for the port given. - -```console -$ telepresence intercept --port=: --to-pod= -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -If there are multiple ports that you need forwarded, simply repeat the -flag (`--to-pod= --to-pod=`). - -## Intercepting headless services - -Kubernetes supports creating [services without a ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services), -which, when they have a pod selector, serve to provide a DNS record that will directly point to the service's backing pods. -Telepresence supports intercepting these `headless` services as it would a regular service with a ClusterIP. -So, for example, if you have the following service: - -```yaml ---- -apiVersion: v1 -kind: Service -metadata: - name: my-headless -spec: - type: ClusterIP - clusterIP: None - selector: - service: my-headless - ports: - - port: 8080 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: my-headless - labels: - service: my-headless -spec: - replicas: 1 - serviceName: my-headless - selector: - matchLabels: - service: my-headless - template: - metadata: - labels: - service: my-headless - spec: - containers: - - name: my-headless - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -``` - -You can intercept it like any other: - -```console -$ telepresence intercept my-headless --port 8080 -Using StatefulSet my-headless -intercepted - Intercept name : my-headless - State : ACTIVE - Workload kind : StatefulSet - Destination : 127.0.0.1:8080 - Volume Mount Point: /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-524189712 - Intercepting : all TCP connections -``` - - -This utilizes an initContainer that requires `NET_ADMIN` capabilities. -If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. - - - -This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. -To enable running as GID 7777 on a specific openshift namespace, run: -oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE - - - -Intercepting headless services without a selector is not supported. - diff --git a/docs/pre-release/reference/intercepts/manual-agent.md b/docs/pre-release/reference/intercepts/manual-agent.md deleted file mode 100644 index e818171c..00000000 --- a/docs/pre-release/reference/intercepts/manual-agent.md +++ /dev/null @@ -1,221 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Manually injecting the Traffic Agent - -You can directly modify your workload's YAML configuration to add the Telepresence Traffic Agent and enable it to be intercepted. - -When you use a Telepresence intercept, Telepresence automatically edits the workload and services when you use -`telepresence uninstall --agent `. In some GitOps workflows, you may need to use the -[Telepresence Mutating Webhook](../../cluster-config/#mutating-webhook) to keep intercepted workloads unmodified -while you target changes on specific pods. - - -In situations where you don't have access to the proper permissions for numeric ports, as noted in the Note on numeric ports -section of the documentation, it is possible to manually inject the Traffic Agent. Because this is not the recommended approach -to making a workload interceptable, try the Mutating Webhook before proceeding." - - -## Procedure - -You can manually inject the agent into Deployments, StatefulSets, or ReplicaSets. The example on this page -uses the following Deployment: - - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "my-service" - labels: - service: my-service -spec: - replicas: 1 - selector: - matchLabels: - service: my-service - template: - metadata: - labels: - service: my-service - spec: - containers: - - name: echo-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -``` - -The deployment is being exposed by the following service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: "my-service" -spec: - type: ClusterIP - selector: - service: my-service - ports: - - port: 80 - targetPort: 8080 -``` - -### 1. Generating the YAML - -First, generate the YAML for the traffic-agent container: - -```console -$ telepresence genyaml container --container-name echo-container --port 8080 --output - --input deployment.yaml -args: -- agent -env: -- name: TELEPRESENCE_CONTAINER - value: echo-container -- name: _TEL_AGENT_LOG_LEVEL - value: info -- name: _TEL_AGENT_NAME - value: my-service -- name: _TEL_AGENT_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -- name: _TEL_AGENT_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP -- name: _TEL_AGENT_APP_PORT - value: "8080" -- name: _TEL_AGENT_AGENT_PORT - value: "9900" -- name: _TEL_AGENT_MANAGER_HOST - value: traffic-manager.ambassador -image: docker.io/datawire/tel2:2.4.6 -name: traffic-agent -ports: -- containerPort: 9900 - protocol: TCP -readinessProbe: - exec: - command: - - /bin/stat - - /tmp/agent/ready -resources: {} -volumeMounts: -- mountPath: /tel_pod_info - name: traffic-annotations -``` - -Next, generate the YAML for the volume: - -```console -$ telepresence genyaml volume --output - --input deployment.yaml -downwardAPI: - items: - - fieldRef: - fieldPath: metadata.annotations - path: annotations -name: traffic-annotations -``` - - -Enter `telepresence genyaml container --help` or `telepresence genyaml volume --help` for more information about these flags. - - -### 2. Injecting the YAML into the Deployment - -You need to add the `Deployment` YAML you genereated to include the container and the volume. These are placed as elements of `spec.template.spec.containers` and `spec.template.spec.volumes` respectively. -You also need to modify `spec.template.metadata.annotations` and add the annotation `telepresence.getambassador.io/manually-injected: "true"`. -These changes should look like the following: - -```diff -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "my-service" - labels: - service: my-service -spec: - replicas: 1 - selector: - matchLabels: - service: my-service - template: - metadata: - labels: - service: my-service -+ annotations: -+ telepresence.getambassador.io/manually-injected: "true" - spec: - containers: - - name: echo-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -+ - args: -+ - agent -+ env: -+ - name: TELEPRESENCE_CONTAINER -+ value: echo-container -+ - name: _TEL_AGENT_LOG_LEVEL -+ value: info -+ - name: _TEL_AGENT_NAME -+ value: my-service -+ - name: _TEL_AGENT_NAMESPACE -+ valueFrom: -+ fieldRef: -+ fieldPath: metadata.namespace -+ - name: _TEL_AGENT_POD_IP -+ valueFrom: -+ fieldRef: -+ fieldPath: status.podIP -+ - name: _TEL_AGENT_APP_PORT -+ value: "8080" -+ - name: _TEL_AGENT_AGENT_PORT -+ value: "9900" -+ - name: _TEL_AGENT_MANAGER_HOST -+ value: traffic-manager.ambassador -+ image: docker.io/datawire/tel2:2.4.6 -+ name: traffic-agent -+ ports: -+ - containerPort: 9900 -+ protocol: TCP -+ readinessProbe: -+ exec: -+ command: -+ - /bin/stat -+ - /tmp/agent/ready -+ resources: {} -+ volumeMounts: -+ - mountPath: /tel_pod_info -+ name: traffic-annotations -+ volumes: -+ - downwardAPI: -+ items: -+ - fieldRef: -+ fieldPath: metadata.annotations -+ path: annotations -+ name: traffic-annotations -``` - -### 3. Modifying the service - -Once the modified deployment YAML has been applied to the cluster, you need to modify the Service to route traffic to the Traffic Agent. -You can do this by changing the exposed `targetPort` to `9900`. The resulting service should look like: - -```diff -apiVersion: v1 -kind: Service -metadata: - name: "my-service" -spec: - type: ClusterIP - selector: - service: my-service - ports: - - port: 80 -- targetPort: 8080 -+ targetPort: 9900 -``` diff --git a/docs/pre-release/reference/linkerd.md b/docs/pre-release/reference/linkerd.md deleted file mode 100644 index 9b903fa7..00000000 --- a/docs/pre-release/reference/linkerd.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -Description: "How to get Linkerd meshed services working with Telepresence" ---- - -# Using Telepresence with Linkerd - -## Introduction -Getting started with Telepresence on Linkerd services is as simple as adding an annotation to your Deployment: - -```yaml -spec: - template: - metadata: - annotations: - config.linkerd.io/skip-outbound-ports: "8081" -``` - -The local system and the Traffic Agent connect to the Traffic Manager using its gRPC API on port 8081. Telling Linkerd to skip that port allows the Traffic Agent sidecar to fully communicate with the Traffic Manager, and therefore the rest of the Telepresence system. - -## Prerequisites -1. [Telepresence binary](../../install) -2. Linkerd control plane [installed to cluster](https://linkerd.io/2.10/tasks/install/) -3. Kubectl -4. [Working ingress controller](https://www.getambassador.io/docs/edge-stack/latest/howtos/linkerd2) - -## Deploy -Save and deploy the following YAML. Note the `config.linkerd.io/skip-outbound-ports` annotation in the metadata of the pod template. - -```yaml ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: quote -spec: - replicas: 1 - selector: - matchLabels: - app: quote - strategy: - type: RollingUpdate - template: - metadata: - annotations: - linkerd.io/inject: "enabled" - config.linkerd.io/skip-outbound-ports: "8081,8022,6001" - labels: - app: quote - spec: - containers: - - name: backend - image: docker.io/datawire/quote:0.4.1 - ports: - - name: http - containerPort: 8000 - env: - - name: PORT - value: "8000" - resources: - limits: - cpu: "0.1" - memory: 100Mi -``` - -## Connect to Telepresence -Run `telepresence connect` to connect to the cluster. Then `telepresence list` should show the `quote` deployment as `ready to intercept`: - -``` -$ telepresence list - - quote: ready to intercept (traffic-agent not yet installed) -``` - -## Run the intercept -Run `telepresence intercept quote --port 8080:80` to direct traffic from the `quote` deployment to port 8080 on your local system. Assuming you have something listening on 8080, you should now be able to see your local service whenever attempting to access the `quote` service. diff --git a/docs/pre-release/reference/rbac.md b/docs/pre-release/reference/rbac.md deleted file mode 100644 index 6c39739e..00000000 --- a/docs/pre-release/reference/rbac.md +++ /dev/null @@ -1,291 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Telepresence RBAC -The intention of this document is to provide a template for securing and limiting the permissions of Telepresence. -This documentation covers the full extent of permissions necessary to administrate Telepresence components in a cluster. - -There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [intercept](../../howtos/intercepts/), and otherwise be unable to affect Kubernetes resources. - -In addition to the above, there is also a consideration of how to manage Users and Groups in Kubernetes which is outside of the scope of the document. This document will use Service Accounts to assign Roles and Bindings. Other methods of RBAC administration and enforcement can be found on the [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) page. - -## Requirements - -- Kubernetes version 1.16+ -- Cluster admin privileges to apply RBAC - -## Editing your kubeconfig - -This guide also assumes that you are utilizing a kubeconfig file that is specified by the `KUBECONFIG` environment variable. This is a `yaml` file that contains the cluster's API endpoint information as well as the user data being supplied for authentication. The Service Account name used in the example below is called tp-user. This can be replaced by any value (i.e. John or Jane) as long as references to the Service Account are consistent throughout the `yaml`. After an administrator has applied the RBAC configuration, a user should create a `config.yaml` in your current directory that looks like the following:​ - -```yaml -apiVersion: v1 -kind: Config -clusters: -- name: my-cluster # Must match the cluster value in the contexts config - cluster: - ## The cluster field is highly cloud dependent. -contexts: -- name: my-context - context: - cluster: my-cluster # Must match the name field in the clusters config - user: tp-user -users: -- name: tp-user # Must match the name of the Service Account created by the cluster admin - user: - token: # See note below -``` - -The Service Account token will be obtained by the cluster administrator after they create the user's Service Account. Creating the Service Account will create an associated Secret in the same namespace with the format `-token-`. This token can be obtained by your cluster administrator by running `kubectl get secret -n ambassador -o jsonpath='{.data.token}' | base64 -d`. - -After creating `config.yaml` in your current directory, export the file's location to KUBECONFIG by running `export KUBECONFIG=$(pwd)/config.yaml`. You should then be able to switch to this context by running `kubectl config use-context my-context`. - -## Administrating Telepresence - -Telepresence administration requires permissions for creating `Namespaces`, `ServiceAccounts`, `ClusterRoles`, `ClusterRoleBindings`, `Secrets`, `Services`, `MutatingWebhookConfiguration`, and for creating the `traffic-manager` [deployment](../architecture/#traffic-manager) which is typically done by a full cluster administrator. The following permissions are needed for the installation and use of Telepresence: - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: telepresence-admin - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: telepresence-admin-role -rules: - - apiGroups: - - "" - resources: ["pods", "pods/log"] - verbs: ["get", "list", "create", "delete", "watch"] - - apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "update", "create", "delete"] - - apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] - - apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "create", "delete", "watch"] - - apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] - - apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list"] - - apiGroups: - - "rbac.authorization.k8s.io" - resources: ["clusterroles", "clusterrolebindings", "roles", "rolebindings"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch", "create"] - - apiGroups: - - "" - resources: ["secrets"] - verbs: ["get", "create", "list", "delete"] - - apiGroups: - - "" - resources: ["serviceaccounts"] - verbs: ["get", "create", "delete"] - - apiGroups: - - "admissionregistration.k8s.io" - resources: ["mutatingwebhookconfigurations"] - verbs: ["get", "create", "delete"] - - apiGroups: - - "" - resources: ["nodes"] - verbs: ["list", "get", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: telepresence-clusterrolebinding -subjects: - - name: telepresence-admin - kind: ServiceAccount - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - name: telepresence-admin-role - kind: ClusterRole -``` - -There are two ways to install the traffic-manager: Using `telepresence connect` and installing the [helm chart](../../install/helm/). - -By using `telepresence connect`, Telepresence will use your kubeconfig to create the objects mentioned above in the cluster if they don't already exist. If you want the most introspection into what is being installed, we recommend using the helm chart to install the traffic-manager. - -## Cluster-wide telepresence user access - -To allow users to make intercepts across all namespaces, but with more limited `kubectl` permissions, the following `ServiceAccount`, `ClusterRole`, and `ClusterRoleBinding` will allow full `telepresence intercept` functionality. - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate value - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods", "pods/log"] - verbs: ["get", "list", "create", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "update", "watch"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "patch", "watch"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list"] -- apiGroups: - - "rbac.authorization.k8s.io" - resources: ["clusterroles", "clusterrolebindings"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: telepresence-rolebinding -subjects: -- name: tp-user - kind: ServiceAccount - namespace: ambassador -roleRef: - apiGroup: rbac.authorization.k8s.io - name: telepresence-role - kind: ClusterRole -``` - -## Namespace only telepresence user access - -RBAC for multi-tenant scenarios where multiple dev teams are sharing a single cluster where users are constrained to a specific namespace(s). - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate user name - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "watch"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] ---- -kind: RoleBinding # RBAC to access ambassador namespace -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: t2-ambassador-binding - namespace: ambassador -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding # RoleBinding T2 namespace to be intecpeted -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-test-binding # Update "test" for appropriate namespace to be intercepted - namespace: test # Update "test" for appropriate namespace to be intercepted -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io -​ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-role -rules: -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-binding -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-namespace-role - apiGroup: rbac.authorization.k8s.io -``` diff --git a/docs/pre-release/reference/restapi.md b/docs/pre-release/reference/restapi.md deleted file mode 100644 index 462abd19..00000000 --- a/docs/pre-release/reference/restapi.md +++ /dev/null @@ -1,93 +0,0 @@ -# Telepresence RESTful API server - -Telepresence can run a RESTful API server on the local host, both on the local workstation and in a pod that contains a `traffic-agent`. The server currently has two endpoints. The standard `healthz` endpoint and the `consume-here` endpoint. - -## Enabling the server -The server is enabled by setting the `telepresenceAPI.port` to a valid port number in the [Telepresence Helm Chart](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). The values may be passed explicitly to Helm during install, or configured using the [Telepresence Config](../config#restful-api-server) to impact an auto-install. - -## Querying the server -On the cluster's side, it's the `traffic-agent` of potentially intercepted pods that runs the server. The server can be accessed using `http://localhost:/` from the application container. Telepresence ensures that the container has the `TELEPRESENCE_API_PORT` environment variable set when the `traffic-agent` is installed. On the workstation, it is the `user-daemon` that runs the server. It uses the `TELEPRESENCE_API_PORT` that is conveyed in the environment of the intercept. This means that the server can be accessed the exact same way locally, provided that the environment is propagated correctly to the interceptor process. - -## Endpoints - -The `consume-here` and `intercept-info` endpoints are both intended to be queried with an optional path query and a set of headers, typically obtained from a Kafka message or similar. Telepresence provides the ID of the intercept in the environment variable [TELEPRESENCE_INTERCEPT_ID](../environment/#telepresence_intercept_id) during an intercept. This ID must be provided in a `x-telepresence-caller-intercept-id: = ` header. Telepresence needs this to identify the caller correctly. The `` will be empty when running in the cluster, but it's harmless to provide it there too, so there's no need for conditional code. - -There are three prerequisites to fulfill before testing The `consume-here` and `intercept-info` endpoints using `curl -v` on the workstation: -1. An intercept must be active -2. The "/healthz" endpoint must respond with OK -3. The ID of the intercept must be known. It will be visible as `ID` in the output of `telepresence list --debug`. - -### healthz -The `http://localhost:/healthz` endpoint should respond with status code 200 OK. If it doesn't then something isn't configured correctly. Check that the `traffic-agent` container is present and that the `TELEPRESENCE_API_PORT` has been added to the environment of the application container and/or in the environment that is propagated to the interceptor that runs on the local workstation. - -#### test endpoint using curl -A `curl -v` call can be used to test the endpoint when an intercept is active. This example assumes that the API port is configured to be 9980. -```console -$ curl -v localhost:9980/healthz -* Trying ::1:9980... -* Connected to localhost (::1) port 9980 (#0) -> GET /healthz HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.76.1 -> Accept: */* -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Date: Fri, 26 Nov 2021 07:06:18 GMT -< Content-Length: 0 -< -* Connection #0 to host localhost left intact -``` - -### consume-here -`http://localhost:/consume-here` will respond with "true" (consume the message) or "false" (leave the message on the queue). When running in the cluster, this endpoint will respond with `false` if the headers match an ongoing intercept for the same workload because it's assumed that it's up to the intercept to consume the message. When running locally, the response is inverted. Matching headers means that the message should be consumed. - -#### test endpoint using curl -Assuming that the API-server runs on port 9980, that the intercept was started with `--http-match x=y --http-path-prefix=/api`, we can now check that the "/consume-here" returns "true" for the path "/api" and given headers. -```console -$ curl -v localhost:9980/consume-here?path=/api -H 'x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest' -H 'x: y' -* Trying ::1:9980... -* Connected to localhost (::1) port 9980 (#0) -> GET /consume-here?path=/api HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.76.1 -> Accept: */* -> x: y -> x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Content-Type: application/json -< Date: Fri, 26 Nov 2021 06:43:28 GMT -< Content-Length: 4 -< -* Connection #0 to host localhost left intact -true -``` - -If you can run curl from the pod, you can try the exact same URL. The result should be "false" when there's an ongoing intercept. The `x-telepresence-caller-intercept-id` is not needed when the call is made from the pod. - -### intercept-info -`http://localhost:/intercept-info` is intended to be queried with an optional path query and a set of headers, typically obtained from a Kafka message or similar, and will respond with a JSON structure containing the two booleans `clientSide` and `intercepted`, and a `metadata` map which corresponds to the `--http-meta` key pairs used when the intercept was created. This field is always omitted in case `intercepted` is `false`. - -#### test endpoint using curl -Assuming that the API-server runs on port 9980, that the intercept was started with `--http-match x=y --http-path-prefix=/api --http-meta a=b --http-meta b=c`, we can now check that the "/intercept-info" returns information for the given path and headers. -```console -$ curl -v localhost:9980/intercept-info?path=/api -H 'x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest' -H 'x: y' -* Trying ::1:9980...* Connected to localhost (127.0.0.1) port 9980 (#0) -> GET /intercept-info?path=/api HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.79.1 -> Accept: */* -> x: y -> x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Content-Type: application/json -< Date: Tue, 01 Feb 2022 11:39:55 GMT -< Content-Length: 68 -< -{"intercepted":true,"clientSide":true,"metadata":{"a":"b","b":"c"}} -* Connection #0 to host localhost left intact -``` diff --git a/docs/pre-release/reference/routing.md b/docs/pre-release/reference/routing.md deleted file mode 100644 index 061ba8fa..00000000 --- a/docs/pre-release/reference/routing.md +++ /dev/null @@ -1,69 +0,0 @@ -# Connection Routing - -## Outbound - -### DNS resolution -When requesting a connection to a host, the IP of that host must be determined. Telepresence provides DNS resolvers to help with this task. There are currently four types of resolvers but only one of them will be used on a workstation at any given time. Common for all of them is that they will propagate a selection of the host lookups to be performed in the cluster. The selection normally includes all names ending with `.cluster.local` or a currently mapped namespace but more entries can be added to the list using the `include-suffixes` option in the -[local DNS configuration](../config/#dns) - -#### Cluster side DNS lookups -The cluster side host lookup will be performed by the traffic-manager unless the client has an active intercept, in which case, the agent performing that intercept will be responsible for doing it. If the client has multiple intercepts, then all of them will be asked to perform the lookup, and the response to the client will contain the unique sum of IPs that they produce. It's therefore important to never have multiple intercepts that span more than one namespace[[1](#namespacelimit)]. The reason for asking all of them is that the workstation currently impersonates multiple containers, and it is not possible to determine on behalf of what container the lookup request is made. - -#### macOS resolver -This resolver hooks into the macOS DNS system by creating files under `/etc/resolver`. Those files correspond to some domain and contain the port number of the Telepresence resolver. Telepresence creates one such file for each of the currently mapped namespaces and `include-suffixes` option. The file `telepresence.local` contains a search path that is configured based on current intercepts so that single label names can be resolved correctly. - -#### Linux systemd-resolved resolver -This resolver registers itself as part of telepresence's [VIF](../tun-device) using `systemd-resolved` and uses the DBus API to configure domains and routes that corresponds to the current set of intercepts and namespaces. - -#### Linux overriding resolver -Linux systems that aren't configured with `systemd-resolved` will use this resolver. A Typical case is when running Telepresence [inside a docker container](../inside-container). During initialization, the resolver will first establish a _fallback_ connection to the IP passed as `--dns`, the one configured as `local-ip` in the [local DNS configuration](../config/#dns), or the primary `nameserver` registered in `/etc/resolv.conf`. It will then use iptables to actually override that IP so that requests to it instead end up in the overriding resolver, which unless it succeeds on its own, will use the _fallback_. - -#### Windows resolver -This resolver uses the DNS resolution capabilities of the [win-tun](https://www.wintun.net/) device in conjunction with [Win32_NetworkAdapterConfiguration SetDNSDomain](https://docs.microsoft.com/en-us/powershell/scripting/samples/performing-networking-tasks?view=powershell-7.2#assigning-the-dns-domain-for-a-network-adapter). - -#### DNS caching -The Telepresence DNS resolver often changes its configuration. This means that Telepresence must either flush the DNS caches on the local host, or ensure that DNS-records returned from the Telepresence resolver aren't cached (or cached for a very short time). All operating systems have different ways of flushing the DNS caches and even different versions of one system may have differences. Also, on some systems it is necessary to actually kill and restart processes to ensure a proper flush, which in turn may result in network instabilities. - -Starting with 2.4.7, Telepresence will no longer flush the host's DNS caches. Instead, all records will have a short Time To Live (TTL) so that such caches evict the entries quickly. This causes increased load on the Telepresence resolver (shorter TTL means more frequent queries) and to cater for that, telepresence now has an internal cache to minimize the number of DNS queries that it sends to the cluster. This cache is flushed as needed without causing instabilities. - -### Routing - -#### Subnets -The Telepresence `traffic-manager` service is responsible for discovering the cluster's service subnet and all subnets used by the pods. In order to do this, it needs permission to create a dummy service[[2](#servicesubnet)] in its own namespace, and the ability to list, get, and watch nodes and pods. Most clusters will expose the pod subnets as `podCIDR` in the `Node` while others, like Amazon EKS, don't. Telepresence will then fall back to deriving the subnets from the IPs of all pods. If you'd like to choose a specific method for discovering subnets, or want to provide the list yourself, you can use the `podCIDRStrategy` configuration value in the [helm](../../install/helm) chart to do that. - -The complete set of subnets that the [VIF](../tun-device) will be configured with is dynamic and may change during a connection's life cycle as new nodes arrive or disappear from the cluster. The set consists of what that the traffic-manager finds in the cluster, and the subnets configured using the [also-proxy](../config#alsoproxy) configuration option. Telepresence will remove subnets that are equal to, or completely covered by, other subnets. - -#### Connection origin -A request to connect to an IP-address that belongs to one of the subnets of the [VIF](../tun-device) will cause a connection request to be made in the cluster. As with host name lookups, the request will originate from the traffic-manager unless the client has ongoing intercepts. If it does, one of the intercepted pods will be chosen, and the request will instead originate from that pod. This is a best-effort approach. Telepresence only knows that the request originated from the workstation. It cannot know that it is intended to originate from a specific pod when multiple intercepts are active. - -A `--local-only` intercept will not have any effect on the connection origin because there is no pod from which the connection can originate. The intercept must be made on a workload that has been deployed in the cluster if there's a requirement for correct connection origin. - -There are multiple reasons for doing this. One is that it is important that the request originates from the correct namespace. Example: - -```bash -curl some-host -``` -results in a http request with header `Host: some-host`. Now, if a service-mesh like Istio performs header based routing, then it will fail to find that host unless the request originates from the same namespace as the host resides in. Another reason is that the configuration of a service mesh can contain very strict rules. If the request then originates from the wrong pod, it will be denied. Only one intercept at a time can be used if there is a need to ensure that the chosen pod is exactly right. - -### Recursion detection -It is common that clusters used in development, such as Minikube, Minishift or k3s, run on the same host as the Telepresence client, often in a Docker container. Such clusters may have access to host network, which means that both DNS and L4 routing may be subjected to recursion. - -#### DNS recursion -When a local cluster's DNS-resolver fails to resolve a hostname, it may fall back to querying the local host network. This means that the Telepresence resolver will be asked to resolve a query that was issued from the cluster. Telepresence must check if such a query is recursive because there is a chance that it actually originated from the Telepresence DNS resolver and was dispatched to the `traffic-manager`, or a `traffic-agent`. - -Telepresence handles this by sending one initial DNS-query to resolve the hostname "tel2-recursion-check.kube-system". If the cluster runs locally, and has access to the local host's network, then that query will recurse back into the Telepresence resolver. Telepresence remembers this and alters its own behavior so that queries that are believed to be recursions are detected and respond with an NXNAME record. Telepresence performs this solution to the best of its ability, but may not be completely accurate in all situations. There's a chance that the DNS-resolver will yield a false negative for the second query if the same hostname is queried more than once in rapid succession, that is when the second query is made before the first query has received a response from the cluster. - -#### Connect recursion -A cluster running locally may dispatch connection attempts to non-existing host:port combinations to the host network. This means that they may reach the Telepresence [VIF](../tun-device). Endless recursions occur if the VIF simply dispatches such attempts on to the cluster. - -The telepresence client handles this by serializing all connection attempts to one specific IP:PORT, trapping all subsequent attempts to connect to that IP:PORT until the first attempt has completed. If the first attempt was deemed a success, then the currently trapped attempts are allowed to proceed. If the first attempt failed, then the currently trapped attempts fail. - -## Inbound - -The traffic-manager and traffic-agent are mutually responsible for setting up the necessary connection to the workstation when an intercept becomes active. In versions prior to 2.3.2, this would be accomplished by the traffic-manager creating a port dynamically that it would pass to the traffic-agent. The traffic-agent would then forward the intercepted connection to that port, and the traffic-manager would forward it to the workstation. This lead to problems when integrating with service meshes like Istio since those dynamic ports needed to be configured. It also imposed an undesired requirement to be able to use mTLS between the traffic-manager and traffic-agent. - -In 2.3.2, this changes, so that the traffic-agent instead creates a tunnel to the traffic-manager using the already existing gRPC API connection. The traffic-manager then forwards that using another tunnel to the workstation. This is completely invisible to other service meshes and is therefore much easier to configure. - -##### Footnotes: -

1: A future version of Telepresence will not allow concurrent intercepts that span multiple namespaces.

-

2: The error message from an attempt to create a service in a bad subnet contains the service subnet. The trick of creating a dummy service is currently the only way to get Kubernetes to expose that subnet.

diff --git a/docs/pre-release/reference/volume.md b/docs/pre-release/reference/volume.md deleted file mode 100644 index 82df9caf..00000000 --- a/docs/pre-release/reference/volume.md +++ /dev/null @@ -1,36 +0,0 @@ -# Volume mounts - -import Alert from '@material-ui/lab/Alert'; - -Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the intercept, this could be a subshell or local server such as Python or Node. - -``` -telepresence intercept --port --mount=/tmp/ -- /bin/bash -``` - -In this case, Telepresence creates the intercept, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. - -Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point in the output of `telepresence list` or using the `$TELEPRESENCE_ROOT` variable. - -``` -$ telepresence intercept --port --mount=true -- /bin/bash -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 - Intercepting : all TCP connections - -bash-3.2$ echo $TELEPRESENCE_ROOT -/var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 -``` - ---mount=true is the default if a mount option is not specified, use --mount=false to disable mounting volumes. - -With either method, the code you run locally either from the subshell or from the intercept command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilize the mounted volumes. - -For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. - -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. diff --git a/docs/pre-release/reference/vpn.md b/docs/pre-release/reference/vpn.md deleted file mode 100644 index cb3f8acf..00000000 --- a/docs/pre-release/reference/vpn.md +++ /dev/null @@ -1,157 +0,0 @@ - -
- -# Telepresence and VPNs - -## The test-vpn command - -You can make use of the `telepresence test-vpn` command to diagnose issues -with your VPN setup. -This guides you through a series of steps to figure out if there are -conflicts between your VPN configuration and telepresence. - -### Prerequisites - -Before running `telepresence test-vpn` you should ensure that your VPN is -in split-tunnel mode. -This means that only traffic that _must_ pass through the VPN is directed -through it; otherwise, the test results may be inaccurate. - -You may need to configure this on both the client and server sides. -Client-side, taking the Tunnelblick client as an example, you must ensure that -the `Route all IPv4 traffic through the VPN` tickbox is not enabled: - - - -Server-side, taking AWS' ClientVPN as an example, you simply have to enable -split-tunnel mode: - - - -In AWS, this setting can be toggled without reprovisioning the VPN. Other cloud providers may work differently. - -### Testing the VPN configuration - -To run it, enter: - -```console -$ telepresence test-vpn -``` - -The test-vpn tool begins by asking you to disconnect from your VPN; ensure you are disconnected then -press enter: - -``` -Telepresence Root Daemon is already stopped -Telepresence User Daemon is already stopped -Please disconnect from your VPN now and hit enter once you're disconnected... -``` - -Once it's gathered information about your network configuration without an active connection, -it will ask you to connect to the VPN: - -``` -Please connect to your VPN now and hit enter once you're connected... -``` - -It will then connect to the cluster: - - -``` -Launching Telepresence Root Daemon -Launching Telepresence User Daemon -Connected to context arn:aws:eks:us-east-1:914373874199:cluster/josec-tp-test-vpn-cluster (https://07C63820C58A0426296DAEFC73AED10C.gr7.us-east-1.eks.amazonaws.com) -Telepresence Root Daemon quitting... done -Telepresence User Daemon quitting... done -``` - -And show you the results of the test: - -``` ----------- Test Results: -❌ pod subnet 10.0.0.0/19 is masking VPN-routed CIDR 10.0.0.0/16. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/19 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 10.0.0.0/16 are placed in the never-proxy list -✅ svc subnet 10.19.0.0/16 is clear of VPN - -Please see https://www.telepresence.io/docs/latest/reference/vpn for more info on these corrective actions, as well as examples - -Still having issues? Please create a new github issue at https://github.com/telepresenceio/telepresence/issues/new?template=Bug_report.md - Please make sure to add the following to your issue: - * Run `telepresence loglevel debug`, try to connect, then run `telepresence gather_logs`. It will produce a zipfile that you should attach to the issue. - * Which VPN client are you using? - * Which VPN server are you using? - * How is your VPN pushing DNS configuration? It may be useful to add the contents of /etc/resolv.conf -``` - -#### Interpreting test results - -##### Case 1: VPN masked by cluster - -In an instance where the VPN is masked by the cluster, the test-vpn tool informs you that a pod or service subnet is masking a CIDR that the VPN -routes: - -``` -❌ pod subnet 10.0.0.0/19 is masking VPN-routed CIDR 10.0.0.0/16. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/19 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 10.0.0.0/16 are placed in the never-proxy list -``` - -This means that all VPN hosts within `10.0.0.0/19` will be rendered inaccessible while -telepresence is connected. - -The ideal resolution in this case is to move the pods to a different subnet. This is possible, -for example, in Amazon EKS by configuring a [new CIDR range](https://aws.amazon.com/premiumsupport/knowledge-center/eks-multiple-cidr-ranges/) for the pods. -In this case, configuring the pods to be located in `10.1.0.0/19` clears the VPN and allows you -to reach hosts inside the VPC's `10.0.0.0/19` - -However, it is not always possible to move the pods to a different subnet. -In these cases, you should use the [never-proxy](../config#neverproxy) configuration to prevent certain -hosts from being masked. -This might be particularly important for DNS resolution. In an AWS ClientVPN VPN it is often -customary to set the `.2` host as a DNS server (e.g. `10.0.0.2` in this case): - - - -If this is the case for your VPN, you should place the DNS server in the never-proxy list for your -cluster. In your kubeconfig file, add a `telepresence` extension like so: - -```yaml -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - never-proxy: - - 10.0.0.2/32 -``` - -##### Case 2: Cluster masked by VPN - -In an instance where the Cluster is masked by the VPN, the test-vpn tool informs you that a pod or service subnet is being masked by a CIDR -that the VPN routes: - -``` -❌ pod subnet 10.0.0.0/8 being masked by VPN-routed CIDR 10.0.0.0/16. This usually means that Telepresence will not be able to connect to your cluster. To resolve: - * Move pod subnet 10.0.0.0/8 to a subnet not mapped by the VPN - * If this is not possible, consider shrinking the mask of the 10.0.0.0/16 CIDR (e.g. from /16 to /8), or disabling split-tunneling -``` - -Typically this means that pods within `10.0.0.0/8` are not accessible while the VPN is -connected. - -As with the first case, the ideal resolution is to move the pods away, but this may not always -be possible. In that case, your best bet is to attempt to shrink the VPN's CIDR -(that is, make it route more hosts) to make Telepresence's routes win by virtue of specificity. -One easy way to do this may be by disabling split tunneling (see the [prerequisites](#prerequisites) -section for more on split-tunneling). - -Note that once you fix this, you may find yourself landing again in [Case 1](#case-1-vpn-masked-by-cluster), and may need -to use never-proxy rules to whitelist hosts in the VPN: - -``` -❌ pod subnet 10.0.0.0/8 is masking VPN-routed CIDR 0.0.0.0/1. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/8 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 0.0.0.0/1 are placed in the never-proxy list -``` -
diff --git a/docs/pre-release/release-notes/no-ssh.png b/docs/pre-release/release-notes/no-ssh.png deleted file mode 100644 index 025f20ab..00000000 Binary files a/docs/pre-release/release-notes/no-ssh.png and /dev/null differ diff --git a/docs/pre-release/release-notes/run-tp-in-docker.png b/docs/pre-release/release-notes/run-tp-in-docker.png deleted file mode 100644 index 53b66a9b..00000000 Binary files a/docs/pre-release/release-notes/run-tp-in-docker.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.2.png b/docs/pre-release/release-notes/telepresence-2.2.png deleted file mode 100644 index 43abc7e8..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.2.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.0-homebrew.png b/docs/pre-release/release-notes/telepresence-2.3.0-homebrew.png deleted file mode 100644 index e203a975..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.0-homebrew.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.0-loglevels.png b/docs/pre-release/release-notes/telepresence-2.3.0-loglevels.png deleted file mode 100644 index 3d628c54..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.0-loglevels.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.1-alsoProxy.png b/docs/pre-release/release-notes/telepresence-2.3.1-alsoProxy.png deleted file mode 100644 index 4052b927..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.1-alsoProxy.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.1-brew.png b/docs/pre-release/release-notes/telepresence-2.3.1-brew.png deleted file mode 100644 index 2af42490..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.1-brew.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.1-dns.png b/docs/pre-release/release-notes/telepresence-2.3.1-dns.png deleted file mode 100644 index c6335e7a..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.1-dns.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.1-inject.png b/docs/pre-release/release-notes/telepresence-2.3.1-inject.png deleted file mode 100644 index aea1003e..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.1-inject.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.1-large-file-transfer.png b/docs/pre-release/release-notes/telepresence-2.3.1-large-file-transfer.png deleted file mode 100644 index 48ceb381..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.1-large-file-transfer.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.1-trafficmanagerconnect.png b/docs/pre-release/release-notes/telepresence-2.3.1-trafficmanagerconnect.png deleted file mode 100644 index 78128c17..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.1-trafficmanagerconnect.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.2-subnets.png b/docs/pre-release/release-notes/telepresence-2.3.2-subnets.png deleted file mode 100644 index 778c722a..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.2-subnets.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.2-svcport-annotation.png b/docs/pre-release/release-notes/telepresence-2.3.2-svcport-annotation.png deleted file mode 100644 index 1e1e9240..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.2-svcport-annotation.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.3-helm.png b/docs/pre-release/release-notes/telepresence-2.3.3-helm.png deleted file mode 100644 index 7b81480a..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.3-helm.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.3-namespace-config.png b/docs/pre-release/release-notes/telepresence-2.3.3-namespace-config.png deleted file mode 100644 index 7864d3a3..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.3-namespace-config.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.3-to-pod.png b/docs/pre-release/release-notes/telepresence-2.3.3-to-pod.png deleted file mode 100644 index aa7be3f6..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.3-to-pod.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.4-improved-error.png b/docs/pre-release/release-notes/telepresence-2.3.4-improved-error.png deleted file mode 100644 index fa8a1298..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.4-improved-error.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.4-ip-error.png b/docs/pre-release/release-notes/telepresence-2.3.4-ip-error.png deleted file mode 100644 index 1d37380c..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.4-ip-error.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.5-agent-config.png b/docs/pre-release/release-notes/telepresence-2.3.5-agent-config.png deleted file mode 100644 index 67d6d3e8..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.5-agent-config.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.5-grpc-max-receive-size.png b/docs/pre-release/release-notes/telepresence-2.3.5-grpc-max-receive-size.png deleted file mode 100644 index 32939f9d..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.5-grpc-max-receive-size.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.5-skipLogin.png b/docs/pre-release/release-notes/telepresence-2.3.5-skipLogin.png deleted file mode 100644 index bf79c191..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.5-skipLogin.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png b/docs/pre-release/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png deleted file mode 100644 index d29a05ad..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.7-keydesc.png b/docs/pre-release/release-notes/telepresence-2.3.7-keydesc.png deleted file mode 100644 index 9bffe5cc..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.7-keydesc.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.3.7-newkey.png b/docs/pre-release/release-notes/telepresence-2.3.7-newkey.png deleted file mode 100644 index c7d47c42..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.3.7-newkey.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.0-cloud-messages.png b/docs/pre-release/release-notes/telepresence-2.4.0-cloud-messages.png deleted file mode 100644 index ffd045ae..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.0-cloud-messages.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.0-windows.png b/docs/pre-release/release-notes/telepresence-2.4.0-windows.png deleted file mode 100644 index d27ba254..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.0-windows.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.1-systema-vars.png b/docs/pre-release/release-notes/telepresence-2.4.1-systema-vars.png deleted file mode 100644 index c098b439..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.1-systema-vars.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.4-gather-logs.png b/docs/pre-release/release-notes/telepresence-2.4.4-gather-logs.png deleted file mode 100644 index 7db54173..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.4-gather-logs.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.5-logs-anonymize.png b/docs/pre-release/release-notes/telepresence-2.4.5-logs-anonymize.png deleted file mode 100644 index edd01fde..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.5-logs-anonymize.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.5-pod-yaml.png b/docs/pre-release/release-notes/telepresence-2.4.5-pod-yaml.png deleted file mode 100644 index 3f565c4f..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.5-pod-yaml.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.5-preview-url-questions.png b/docs/pre-release/release-notes/telepresence-2.4.5-preview-url-questions.png deleted file mode 100644 index 1823aaa1..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.5-preview-url-questions.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.6-help-text.png b/docs/pre-release/release-notes/telepresence-2.4.6-help-text.png deleted file mode 100644 index aab9178a..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.6-help-text.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.8-health-check.png b/docs/pre-release/release-notes/telepresence-2.4.8-health-check.png deleted file mode 100644 index e10a0b47..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.8-health-check.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.4.8-vpn.png b/docs/pre-release/release-notes/telepresence-2.4.8-vpn.png deleted file mode 100644 index fbb21588..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.4.8-vpn.png and /dev/null differ diff --git a/docs/pre-release/release-notes/telepresence-2.5.0-pro-daemon.png b/docs/pre-release/release-notes/telepresence-2.5.0-pro-daemon.png deleted file mode 100644 index 5b82fc76..00000000 Binary files a/docs/pre-release/release-notes/telepresence-2.5.0-pro-daemon.png and /dev/null differ diff --git a/docs/pre-release/release-notes/tunnel.jpg b/docs/pre-release/release-notes/tunnel.jpg deleted file mode 100644 index 59a0397e..00000000 Binary files a/docs/pre-release/release-notes/tunnel.jpg and /dev/null differ diff --git a/docs/pre-release/releaseNotes.yml b/docs/pre-release/releaseNotes.yml deleted file mode 100644 index 373be545..00000000 --- a/docs/pre-release/releaseNotes.yml +++ /dev/null @@ -1,1269 +0,0 @@ -# This file should be placed in the folder for the version of the -# product that's meant to be documented. A `/release-notes` page will -# be automatically generated and populated at build time. -# -# Note that an entry needs to be added to the `doc-links.yml` file in -# order to surface the release notes in the table of contents. -# -# The YAML in this file should contain: -# -# changelog: An (optional) URL to the CHANGELOG for the product. -# items: An array of releases with the following attributes: -# - version: The (optional) version number of the release, if applicable. -# - date: The date of the release in the format YYYY-MM-DD. -# - notes: An array of noteworthy changes included in the release, each having the following attributes: -# - type: The type of change, one of `bugfix`, `feature`, `security` or `change`. -# - title: A short title of the noteworthy change. -# - body: >- -# Two or three sentences describing the change and why it -# is noteworthy. This is HTML, not plain text or -# markdown. It is handy to use YAML's ">-" feature to -# allow line-wrapping. -# - image: >- -# The URL of an image that visually represents the -# noteworthy change. This path is relative to the -# `release-notes` directory; if this file is -# `FOO/releaseNotes.yml`, then the image paths are -# relative to `FOO/release-notes/`. -# - docs: The path to the documentation page where additional information can be found. -# - href: A path from the root to a resource on the getambassador website, takes precedence over a docs link. - -docTitle: Telepresence Release Notes -docDescription: >- - Release notes for Telepresence by Ambassador Labs, a CNCF project - that enables developers to iterate rapidly on Kubernetes - microservices by arming them with infinite-scale development - environments, access to instantaneous feedback loops, and highly - customizable development environments. - -changelog: https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md - -items: - - version: 2.5.8 - date: "2022-04-27" - notes: - - type: bugfix - title: Folder creation on `telepresence login` - body: >- - Fixed a bug where the telepresence config folder would not be created if the user ran `telepresence login` before other commands. - - version: 2.5.7 - date: "2022-04-25" - notes: - - type: change - title: RBAC requirements - body: >- - A namespaced traffic-manager will no longer require cluster wide RBAC. Only Roles and RoleBindings are now used. - - type: bugfix - title: Windows DNS - body: >- - The DNS recursion detector didn't work correctly on Windows, resulting in sporadic failures to resolve names that were resolved correctly at other times. - - type: bugfix - title: Session TTL and Reconnect - body: >- - A telepresence session will now last for 24 hours after the user's last connectivity. If a session expires, the connector will automatically try to reconnect. - - version: 2.5.6 - date: "2022-04-18" - notes: - - type: change - title: Less Watchers - body: >- - Telepresence agents watcher will now only watch namespaces that the user has accessed since the last `connect`. - - type: bugfix - title: More Efficient `gather-logs` - body: >- - The `gather-logs` command will no longer send any logs through `gRPC`. - - version: 2.5.5 - date: "2022-04-08" - notes: - - type: change - title: Traffic Manager Permissions - body: >- - The traffic-manager now requires permissions to read pods across namespaces even if installed with limited permissions - - type: bugfix - title: Linux DNS Cache - body: >- - The DNS resolver used on Linux with systemd-resolved now flushes the cache when the search path changes. - - type: bugfix - title: Automatic Connect Sync - body: >- - The `telepresence list` command will produce a correct listing even when not preceded by a `telepresence connect`. - - type: bugfix - title: Disconnect Reconnect Stability - body: >- - The root daemon will no longer get into a bad state when a disconnect is rapidly followed by a new connect. - - type: bugfix - title: Limit Watched Namespaces - body: >- - The client will now only watch agents from accessible namespaces, and is also constrained to namespaces explicitly mapped using the `connect` command's `--mapped-namespaces` flag. - - type: bugfix - title: Limit Namespaces used in `gather-logs` - body: >- - The `gather-logs` command will only gather traffic-agent logs from accessible namespaces, and is also constrained to namespaces explicitly mapped using the `connect` command's `--mapped-namespaces` flag. - - version: 2.5.4 - date: "2022-03-29" - notes: - - type: bugfix - title: Linux DNS Concurrency - body: >- - The DNS fallback resolver on Linux now correctly handles concurrent requests without timing them out - - type: bugfix - title: Non-Functional Flag - body: >- - The ingress-l5 flag will no longer be forcefully set to equal the --ingress-host flag - - type: bugfix - title: Automatically Remove Failed Intercepts - body: >- - Intercepts that fail to create are now consistently removed to prevent non-working dangling intercepts from sticking around. - - type: bugfix - title: Agent UID - body: >- - Agent container is no longer sensitive to a random UID or an UID imposed by a SecurityContext. - - type: bugfix - title: Gather-Logs Output Filepath - body: >- - Removed a bad concatenation that corrupted the output path of `telepresence gather-logs`. - - type: change - title: Remove Unnecessary Error Advice - body: >- - An advice to "see logs for details" is no longer printed when the argument count is incorrect in a CLI command. - - type: bugfix - title: Garbage Collection - body: >- - Client and agent sessions no longer leaves dangling waiters in the traffic-manager when they depart. - - type: bugfix - title: Limit Gathered Logs - body: >- - The client's gather logs command and agent watcher will now respect the configured grpc.maxReceiveSize - - type: change - title: In-Cluster Checks - body: >- - The TUN device will no longer route pod or service subnets if it is running in a machine that's already connected to the cluster - - type: change - title: Expanded Status Command - body: >- - The status command includes the install id, user id, account id, and user email in its result, and can print output as JSON - - type: change - title: List Command Shows All Intercepts - body: >- - The list command, when used with the `--intercepts` flag, will list the users intercepts from all namespaces - - version: 2.5.3 - date: "2022-02-25" - notes: - - type: bugfix - title: TCP Connectivity - body: >- - Fixed bug in the TCP stack causing timeouts after repeated connects to the same address - - type: feature - title: Linux Binaries - body: >- - Client-side binaries for the arm64 architecture are now available for linux - - version: 2.5.2 - date: "2022-02-23" - notes: - - type: bugfix - title: DNS server bugfix - body: >- - Fixed a bug where Telepresence would use the last server in resolv.conf - - version: 2.5.1 - date: "2022-02-19" - notes: - - type: bugfix - title: Fix GKE auth issue - body: >- - Fixed a bug where using a GKE cluster would error with: No Auth Provider found for name "gcp" - - version: 2.5.0 - date: "2022-02-18" - notes: - - type: feature - title: Intercept specific endpoints - body: >- - The flags --http-path-equal, --http-path-prefix, and --http-path-regex can can be used in - addition to the --http-match flag to filter personal intercepts by the request URL path - docs: concepts/intercepts#intercepting-a-specific-endpoint - - type: feature - title: Intercept metadata - body: >- - The flag --http-meta can be used to declare metadata key value pairs that will be returned by the Telepresence rest - API endpoint /intercept-info - docs: reference/restapi#intercept-info - - type: change - title: Client RBAC watch - body: >- - The verb "watch" was added to the set of required verbs when accessing services and workloads for the client RBAC - ClusterRole - docs: reference/rbac - - type: change - title: Dropped backward compatibility with versions <=2.4.4 - body: >- - Telepresence is no longer backward compatible with versions 2.4.4 or older because the deprecated multiplexing tunnel - functionality was removed. - - type: change - title: No global networking flags - body: >- - The global networking flags are no longer used and using them will render a deprecation warning unless they are supported by the - command. The subcommands that support networking flags are connect, current-cluster-id, - and genyaml. - - type: bugfix - title: Output of status command - body: >- - The also-proxy and never-proxy subnets are now displayed correctly when using the - telepresence status command. - - type: bugfix - title: SETENV sudo privilege no longer needed - body: >- - Telepresence longer requires SETENV privileges when starting the root daemon. - - type: bugfix - title: Network device names containing dash - body: >- - Telepresence will now parse device names containing dashes correctly when determining routes that it should never block. - - type: bugfix - title: Linux uses cluster.local as domain instead of search - body: >- - The cluster domain (typically "cluster.local") is no longer added to the DNS search on Linux using - systemd-resolved. Instead, it is added as a domain so that names ending with it are routed - to the DNS server. - - version: 2.4.11 - date: "2022-02-10" - notes: - - type: change - title: Add additional logging to troubleshoot intermittent issues with intercepts - body: >- - We've noticed some issues with intercepts in v2.4.10, so we are releasing a version - with enhanced logging to help debug and fix the issue. - - version: 2.4.10 - date: "2022-01-13" - notes: - - type: feature - title: Application Protocol Strategy - body: >- - The strategy used when selecting the application protocol for personal intercepts can now be configured using - the intercept.appProtocolStrategy in the config.yml file. - docs: reference/config/#intercept - image: telepresence-2.4.10-intercept-config.png - - type: feature - title: Helm value for the Application Protocol Strategy - body: >- - The strategy when selecting the application protocol for personal intercepts in agents injected by the - mutating webhook can now be configured using the agentInjector.appProtocolStrategy in the Helm chart. - docs: install/helm - - type: feature - title: New --http-plaintext option - body: >- - The flag --http-plaintext can be used to ensure that an intercept uses plaintext http or grpc when - communicating with the workstation process. - docs: reference/intercepts/#tls - - type: feature - title: Configure the default intercept port - body: >- - The port used by default in the telepresence intercept command (8080), can now be changed by setting - the intercept.defaultPort in the config.yml file. - docs: reference/config/#intercept - - type: change - title: Telepresence CI now uses Github Actions - body: >- - Telepresence now uses Github Actions for doing unit and integration testing. It is - now easier for contributors to run tests on PRs since maintainers can add an - "ok to test" label to PRs (including from forks) to run integration tests. - docs: https://github.com/telepresenceio/telepresence/actions - image: telepresence-2.4.10-actions.png - - type: bugfix - title: Check conditions before asking questions - body: >- - User will not be asked to log in or add ingress information when creating an intercept until a check has been - made that the intercept is possible. - docs: reference/intercepts/ - - type: bugfix - title: Fix invalid log statement - body: >- - Telepresence will no longer log invalid: "unhandled connection control message: code DIAL_OK" errors. - - type: bugfix - title: Log errors from sshfs/sftp - body: >- - Output to stderr from the traffic-agent's sftp and the client's sshfs processes - are properly logged as errors. - - type: bugfix - title: Don't use Windows path separators in workload pod template - body: >- - Auto installer will no longer not emit backslash separators for the /tel-app-mounts paths in the - traffic-agent container spec when running on Windows. - - version: 2.4.9 - date: "2021-12-09" - notes: - - type: bugfix - title: Helm upgrade nil pointer error - body: >- - A helm upgrade using the --reuse-values flag no longer fails on a "nil pointer" error caused by a nil - telpresenceAPI value. - docs: install/helm#upgrading-the-traffic-manager - - version: 2.4.8 - date: "2021-12-03" - notes: - - type: feature - title: VPN diagnostics tool - body: >- - There is a new subcommand, test-vpn, that can be used to diagnose connectivity issues with a VPN. - See the VPN docs for more information on how to use it. - docs: reference/vpn - image: telepresence-2.4.8-vpn.png - - - type: feature - title: RESTful API service - body: >- - A RESTful service was added to Telepresence, both locally to the client and to the traffic-agent to - help determine if messages with a set of headers should be consumed or not from a message queue where the - intercept headers are added to the messages. - docs: reference/restapi - image: telepresence-2.4.8-health-check.png - - - type: change - title: TELEPRESENCE_LOGIN_CLIENT_ID env variable no longer used - body: >- - You could previously configure this value, but there was no reason to change it, so the value - was removed. - - - type: bugfix - title: Tunneled network connections behave more like ordinary TCP connections. - body: >- - When using Telepresence with an external cloud provider for extensions, those tunneled - connections now behave more like TCP connections, especially when it comes to timeouts. - We've also added increased testing around these types of connections. - - version: 2.4.7 - date: "2021-11-24" - notes: - - type: feature - title: Injector service-name annotation - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-service-name, that can be used to set the name of the service to be intercepted. - This will help disambiguate which service to intercept for when a workload is exposed by multiple services, such as can happen with Argo Rollouts - docs: reference/cluster-config#service-name-annotation - - type: feature - title: Skip the Ingress Dialogue - body: >- - You can now skip the ingress dialogue by setting the ingress parameters in the corresponding flags. - docs: reference/intercepts#skipping-the-ingress-dialogue - - type: feature - title: Never proxy subnets - body: >- - The kubeconfig extensions now support a never-proxy argument, - analogous to also-proxy, that defines a set of subnets that - will never be proxied via telepresence. - docs: reference/config#neverproxy - - type: change - title: Daemon versions check - body: >- - Telepresence now checks the versions of the client and the daemons and asks the user to quit and restart if they don't match. - - type: change - title: No explicit DNS flushes - body: >- - Telepresence DNS now uses a very short TTL instead of explicitly flushing DNS by killing the mDNSResponder or doing resolvectl flush-caches - docs: reference/routing#dns-caching - - type: bugfix - title: Legacy flags now work with global flags - body: >- - Legacy flags such as `--swap-deployment` can now be used together with global flags. - - type: bugfix - title: Outbound connection closing - body: >- - Outbound connections are now properly closed when the peer closes. - - type: bugfix - title: Prevent DNS recursion - body: >- - The DNS-resolver will trap recursive resolution attempts (may happen when the cluster runs in a docker-container on the client). - docs: reference/routing#dns-recursion - - type: bugfix - title: Prevent network recursion - body: >- - The TUN-device will trap failed connection attempts that results in recursive calls back into the TUN-device (may happen when the - cluster runs in a docker-container on the client). - docs: reference/routing#connect-recursion - - type: bugfix - title: Traffic Manager deadlock fix - body: >- - The Traffic Manager no longer runs a risk of entering a deadlock when a new Traffic agent arrives. - - type: bugfix - title: webhookRegistry config propagation - body: >- - The configured webhookRegistry is now propagated to the webhook installer even if no webhookAgentImage has been set. - docs: reference/config#images - - type: bugfix - title: Login refreshes expired tokens - body: >- - When a user's token has expired, telepresence login - will prompt the user to log in again to get a new token. Previously, - the user had to telepresence quit and telepresence logout - to get a new token. - docs: https://github.com/telepresenceio/telepresence/issues/2062 - - version: 2.4.6 - date: "2021-11-02" - notes: - - type: feature - title: Manually injecting Traffic Agent - body: >- - Telepresence now supports manually injecting the traffic-agent YAML into workload manifests. - Use the genyaml command to create the sidecar YAML, then add the telepresence.getambassador.io/manually-injected: "true" annotation to your pods to allow Telepresence to intercept them. - docs: reference/intercepts/manual-agent - - - type: feature - title: Telepresence CLI released for Apple silicon - body: >- - Telepresence is now built and released for Apple silicon. - docs: install/?os=macos - - - type: change - title: Telepresence help text now links to telepresence.io - body: >- - We now include a link to our documentation when you run telepresence --help. This will make it easier - for users to find this page whether they acquire Telepresence through Brew or some other mechanism. - image: telepresence-2.4.6-help-text.png - - - type: bugfix - title: Fixed bug when API server is inside CIDR range of pods/services - body: >- - If the API server for your kubernetes cluster had an IP that fell within the - subnet generated from pods/services in a kubernetes cluster, it would proxy traffic - to the API server which would result in hanging or a failed connection. We now ensure - that the API server is explicitly not proxied. - - version: 2.4.5 - date: "2021-10-15" - notes: - - type: feature - title: Get pod yaml with gather-logs command - body: >- - Adding the flag --get-pod-yaml to your request will get the - pod yaml manifest for all kubernetes components you are getting logs for - ( traffic-manager and/or pods containing a - traffic-agent container). This flag is set to false - by default. - docs: reference/client - image: telepresence-2.4.5-pod-yaml.png - - - type: feature - title: Anonymize pod name + namespace when using gather-logs command - body: >- - Adding the flag --anonymize to your command will - anonymize your pod names + namespaces in the output file. We replace the - sensitive names with simple names (e.g. pod-1, namespace-2) to maintain - relationships between the objects without exposing the real names of your - objects. This flag is set to false by default. - docs: reference/client - image: telepresence-2.4.5-logs-anonymize.png - - - type: feature - title: Added context and defaults to ingress questions when creating a preview URL - body: >- - Previously, we referred to OSI model layers when asking these questions, but this - terminology is not commonly used. The questions now provide a clearer context for the user, along with a default answer as an example. - docs: howtos/preview-urls - image: telepresence-2.4.5-preview-url-questions.png - - - type: feature - title: Support for intercepting headless services - body: >- - Intercepting headless services is now officially supported. You can request a - headless service on whatever port it exposes and get a response from the - intercept. This leverages the same approach as intercepting numeric ports when - using the mutating webhook injector, mainly requires the initContainer - to have NET_ADMIN capabilities. - docs: reference/intercepts/#intercepting-headless-services - - - type: change - title: Use one tunnel per connection instead of multiplexing into one tunnel - body: >- - We have changed Telepresence so that it uses one tunnel per connection instead - of multiplexing all connections into one tunnel. This will provide substantial - performance improvements. Clients will still be backwards compatible with older - managers that only support multiplexing. - - - type: bugfix - title: Added checks for Telepresence kubernetes compatibility - body: >- - Telepresence currently works with Kubernetes server versions 1.17.0 - and higher. We have added logs in the connector and traffic-manager - to let users know when they are using Telepresence with a cluster it doesn't support. - docs: reference/cluster-config - - - type: bugfix - title: Traffic Agent security context is now only added when necessary - body: >- - When creating an intercept, Telepresence will now only set the traffic agent's GID - when strictly necessary (i.e. when using headless services or numeric ports). This mitigates - an issue on openshift clusters where the traffic agent can fail to be created due to - openshift's security policies banning arbitrary GIDs. - - - version: 2.4.4 - date: "2021-09-27" - notes: - - type: feature - title: Numeric ports in agent injector - body: >- - The agent injector now supports injecting Traffic Agents into pods that have unnamed ports. - docs: reference/cluster-config/#note-on-numeric-ports - - - type: feature - title: New subcommand to gather logs and export into zip file - body: >- - Telepresence has logs for various components (the - traffic-manager, traffic-agents, the root and - user daemons), which are integral for understanding and debugging - Telepresence behavior. We have added the telepresence - gather-logs command to make it simple to compile logs for - all Telepresence components and export them in a zip file that can - be shared to others and/or included in a github issue. For more - information on usage, run telepresence gather-logs --help - . - docs: reference/client - image: telepresence-2.4.4-gather-logs.png - - - type: feature - title: Pod CIDR strategy is configurable in Helm chart - body: >- - Telepresence now enables you to directly configure how to get - pod CIDRs when deploying Telepresence with the Helm chart. - The default behavior remains the same. We've also introduced - the ability to explicitly set what the pod CIDRs should be. - docs: install/helm - - - type: bugfix - title: Compute pod CIDRs more efficiently - body: >- - When computing subnets using the pod CIDRs, the traffic-manager - now uses less CPU cycles. - docs: reference/routing/#subnets - - - type: bugfix - title: Prevent busy loop in traffic-manager - body: >- - In some circumstances, the traffic-manager's CPU - would max out and get pinned at its limit. This required a - shutdown or pod restart to fix. We've added some fixes - to prevent the traffic-manager from getting into this state. - - - type: bugfix - title: Added a fixed buffer size to TUN-device - body: >- - The TUN-device now has a max buffer size of 64K. This prevents the - buffer from growing limitlessly until it receies a PSH, which could - be a blocking operation when receiving lots of TCP-packets. - docs: reference/tun-device - - - type: bugfix - title: Fix hanging user daemon - body: >- - When Telepresence encountered an issue connecting to the cluster or - the root daemon, it could hang indefintely. It now will error correctly - when it encounters that situation. - - - type: bugfix - title: Improved proprietary agent connectivity - body: >- - To determine whether the environment cluster is air-gapped, the - proprietary agent attempts to connect to the cloud during startup. - To deal with a possible initial failure, the agent backs off - and retries the connection with an increasing backoff duration. - - - type: bugfix - title: Telepresence correctly reports intercept port conflict - body: >- - When creating a second intercept targetting the same local port, - it now gives the user an informative error message. Additionally, - it tells them which intercept is currently using that port to make - it easier to remedy. - - - version: 2.4.3 - date: "2021-09-15" - notes: - - type: feature - title: Environment variable TELEPRESENCE_INTERCEPT_ID available in interceptor's environment - body: >- - When you perform an intercept, we now include a TELEPRESENCE_INTERCEPT_ID environment - variable in the environment. - docs: reference/environment/#telepresence-environment-variables - - - type: bugfix - title: Improved daemon stability - body: >- - Fixed a timing bug that sometimes caused a "daemon did not start" failure. - - - type: bugfix - title: Complete logs for Windows - body: >- - Crash stack traces and other errors were incorrectly not written to log files. This has - been fixed so logs for Windows should be at parity with the ones in MacOS and Linux. - - - type: bugfix - title: Log rotation fix for Linux kernel 4.11+ - body: >- - On Linux kernel 4.11 and above, the log file rotation now properly reads the - birth-time of the log file. Older kernels continue to use the old behavior - of using the change-time in place of the birth-time. - - - type: bugfix - title: Improved error messaging - body: >- - When Telepresence encounters an error, it tells the user where they should look for - logs related to the error. We have refined this so that it only tells users to look - for errors in the daemon logs for issues that are logged there. - - - type: bugfix - title: Stop resolving localhost - body: >- - When using the overriding DNS resolver, it will no longer apply search paths when - resolving localhost, since that should be resolved on the user's machine - instead of the cluster. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Variable cluster domain - body: >- - Previously, the cluster domain was hardcoded to cluster.local. While this - is true for many kubernetes clusters, it is not for all of them. Now this value is - retrieved from the traffic-manager. - - - type: bugfix - title: Improved cleanup of traffic-agents - body: >- - Telepresence now uninstalls traffic-agents installed via mutating webhook - when using telepresence uninstall --everything. - - - type: bugfix - title: More large file transfer fixes - body: >- - Downloading large files during an intercept will no longer cause timeouts and hanging - traffic-agents. - - - type: bugfix - title: Setting --mount to false when intercepting works as expected - body: >- - When using --mount=false while performing an intercept, the file system - was still mounted. This has been remedied so the intercept behavior respects the - flag. - docs: reference/volume - - - type: bugfix - title: Traffic-manager establishes outbound connections in parallel - body: >- - Previously, the traffic-manager established outbound connections - sequentially. This resulted in slow (and failing) Dial calls would - block all outbound traffic from the workstation (for up to 30 seconds). We now - establish these connections in parallel so that won't occur. - docs: reference/routing/#outbound - - - type: bugfix - title: Status command reports correct DNS settings - body: >- - Telepresence status now correctly reports DNS settings for all operating - systems, instead of Local IP:nil, Remote IP:nil when they don't exist. - - - version: 2.4.2 - date: "2021-09-01" - notes: - - type: feature - title: New subcommand to temporarily change log-level - body: >- - We have added a new telepresence loglevel subcommand that enables users - to temporarily change the log-level for the local demons, the traffic-manager and - the traffic-agents. While the logLevels settings from the config will - still be used by default, this can be helpful if you are currently experiencing an issue and - want to have higher fidelity logs, without doing a telepresence quit and - telepresence connect. You can use telepresence loglevel --help to get - more information on options for the command. - docs: reference/config - - - type: change - title: All components have info as the default log-level - body: >- - We've now set the default for all components of Telepresence (traffic-agent, - traffic-manager, local daemons) to use info as the default log-level. - - - type: bugfix - title: Updating RBAC in helm chart to fix cluster-id regression - body: >- - In 2.4.1, we enabled the traffic-manager to get the cluster ID by getting the UID - of the default namespace. The helm chart was not updated to give the traffic-manager - those permissions, which has since been fixed. This impacted users who use licensed features of - the Telepresence extension in an air-gapped environment. - docs: reference/cluster-config/#air-gapped-cluster - - - type: bugfix - title: Timeouts for Helm actions are now respected - body: >- - The user-defined timeout for Helm actions wasn't always respected, causing the daemon to hang - indefinitely when failing to install the traffic-manager. - docs: reference/config#timeouts - - - version: 2.4.1 - date: "2021-08-30" - notes: - - type: feature - title: External cloud variables are now configurable - body: >- - We now support configuring the host and port for the cloud in your config.yml. These - are used when logging in to utilize features provided by an extension, and are also passed - along as environment variables when installing the `traffic-manager`. Additionally, we - now run our testsuite with these variables set to localhost to continue to ensure Telepresence - is fully fuctional without depeneding on an external service. The SYSTEMA_HOST and SYSTEMA_PORT - environment variables are no longer used. - image: telepresence-2.4.1-systema-vars.png - docs: reference/config/#cloud - - - type: feature - title: Helm chart can now regenerate certificate used for mutating webhook on-demand. - body: >- - You can now set agentInjector.certificate.regenerate when deploying Telepresence - with the Helm chart to automatically regenerate the certificate used by the agent injector webhook. - docs: install/helm - - - type: change - title: Traffic Manager installed via helm - body: >- - The traffic-manager is now installed via an embedded version of the Helm chart when telepresence connect is first performed on a cluster. - This change is transparent to the user. - A new configuration flag, timeouts.helm sets the timeouts for all helm operations performed by the Telepresence binary. - docs: reference/config#timeouts - - - type: change - title: traffic-manager gets cluster ID itself instead of via environment variable - body: >- - The traffic-manager used to get the cluster ID as an environment variable when running - telepresence connnect or via adding the value in the helm chart. This was - clunky so now the traffic-manager gets the value itself as long as it has permissions - to "get" and "list" namespaces (this has been updated in the helm chart). - docs: install/helm - - - type: bugfix - title: Telepresence now mounts all directories from /var/run/secrets - body: >- - In the past, we only mounted secret directories in /var/run/secrets/kubernetes.io. - We now mount *all* directories in /var/run/secrets, which, for example, includes - directories like eks.amazonaws.com used for IRSA tokens. - docs: reference/volume - - - type: bugfix - title: Max gRPC receive size correctly propagates to all grpc servers - body: >- - This fixes a bug where the max gRPC receive size was only propagated to some of the - grpc servers, causing failures when the message size was over the default. - docs: reference/config/#grpc - - - type: bugfix - title: Updated our Homebrew packaging to run manually - body: >- - We made some updates to our script that packages Telepresence for Homebrew so that it - can be run manually. This will enable maintainers of Telepresence to run the script manually - should we ever need to rollback a release and have latest point to an older verison. - docs: install/ - - - type: bugfix - title: Telepresence uses namespace from kubeconfig context on each call - body: >- - In the past, Telepresence would use whatever namespace was specified in the kubeconfig's current-context - for the entirety of the time a user was connected to Telepresence. This would lead to confusing behavior - when a user changed the context in their kubeconfig and expected Telepresence to acknowledge that change. - Telepresence now will do that and use the namespace designated by the context on each call. - - - type: bugfix - title: Idle outbound TCP connections timeout increased to 7200 seconds - body: >- - Some users were noticing that their intercepts would start failing after 60 seconds. - This was because the keep idle outbound TCP connections were set to 60 seconds, which we have - now bumped to 7200 seconds to match Linux's tcp_keepalive_time default. - - - type: bugfix - title: Telepresence will automatically remove a socket upon ungraceful termination - body: >- - When a Telepresence process terminates ungracefully, it would inform users that "this usually means - that the process has terminated ungracefully" and implied that they should remove the socket. We've - now made it so Telepresence will automatically attempt to remove the socket upon ungraceful termination. - - - type: bugfix - title: Fixed user daemon deadlock - body: >- - Remedied a situation where the user daemon could hang when a user was logged in. - - - type: bugfix - title: Fixed agentImage config setting - body: >- - The config setting images.agentImages is no longer required to contain the repository, and it - will use the value at images.repository. - docs: reference/config/#images - - - version: 2.4.0 - date: "2021-08-04" - notes: - - type: feature - title: Windows Client Developer Preview - body: >- - There is now a native Windows client for Telepresence that is being released as a Developer Preview. - All the same features supported by the MacOS and Linux client are available on Windows. - image: telepresence-2.4.0-windows.png - docs: install - - - type: feature - title: CLI raises helpful messages from Ambassador Cloud - body: >- - Telepresence can now receive messages from Ambassador Cloud and raise - them to the user when they perform certain commands. This enables us - to send you messages that may enhance your Telepresence experience when - using certain commands. Frequency of messages can be configured in your - config.yml. - image: telepresence-2.4.0-cloud-messages.png - docs: reference/config#cloud - - - type: bugfix - title: Improved stability of systemd-resolved-based DNS - body: >- - When initializing the systemd-resolved-based DNS, the routing domain - is set to improve stability in non-standard configurations. This also enables the - overriding resolver to do a proper take over once the DNS service ends. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Fixed an edge case when intercepting a container with multiple ports - body: >- - When specifying a port of a container to intercept, if there was a container in the - pod without ports, it was automatically selected. This has been fixed so we'll only - choose the container with "no ports" if there's no container that explicitly matches - the port used in your intercept. - docs: reference/intercepts/#creating-an-intercept-when-a-service-has-multiple-ports - - - type: bugfix - title: $(NAME) references in agent's environments are now interpolated correctly. - body: >- - If you had an environment variable $(NAME) in your workload that referenced another, intercepts - would not correctly interpolate $(NAME). This has been fixed and works automatically. - - - type: bugfix - title: Telepresence no longer prints INFO message when there is no config.yml - body: >- - Fixed a regression that printed an INFO message to the terminal when there wasn't a - config.yml present. The config is optional, so this message has been - removed. - docs: reference/config - - - type: bugfix - title: Telepresence no longer panics when using --http-match - body: >- - Fixed a bug where Telepresence would panic if the value passed to --http-match - didn't contain an equal sign, which has been fixed. The correct syntax is in the --help - string and looks like --http-match=HTTP2_HEADER=REGEX - docs: reference/intercepts/#intercept-behavior-when-logged-in-to-ambassador-cloud - - - type: bugfix - title: Improved subnet updates - body: >- - The `traffic-manager` used to update subnets whenever the `Nodes` or `Pods` changed, even if - the underlying subnet hadn't changed, which created a lot of unnecessary traffic between the - client and the `traffic-manager`. This has been fixed so we only send updates when the subnets - themselves actually change. - docs: reference/routing/#subnets - - - version: 2.3.7 - date: "2021-07-23" - notes: - - type: feature - title: Also-proxy in telepresence status - body: >- - An also-proxy entry in the Kubernetes cluster config will - show up in the output of the telepresence status command. - docs: reference/config - - - type: feature - title: Non-interactive telepresence login - body: >- - telepresence login now has an - --apikey=KEY flag that allows for - non-interactive logins. This is useful for headless - environments where launching a web-browser is impossible, - such as cloud shells, Docker containers, or CI. - image: telepresence-2.3.7-newkey.png - docs: reference/client/login/ - - - type: bugfix - title: Mutating webhook injector correctly hides named ports for probes. - body: >- - The mutating webhook injector has been fixed to correctly rename named ports for liveness and readiness probes - docs: reference/cluster-config - - - type: bugfix - title: telepresence current-cluster-id crash fixed - body: >- - Fixed a regression introduced in 2.3.5 that caused `telepresence current-cluster-id` - to crash. - docs: reference/cluster-config - - - type: bugfix - title: Better UX around intercepts with no local process running - body: >- - Requests would hang indefinitely when initiating an intercept before you - had a local process running. This has been fixed and will result in an - Empty reply from server until you start a local process. - docs: reference/intercepts - - - type: bugfix - title: API keys no longer show as "no description" - body: >- - New API keys generated internally for communication with - Ambassador Cloud no longer show up as "no description" in - the Ambassador Cloud web UI. Existing API keys generated by - older versions of Telepresence will still show up this way. - image: telepresence-2.3.7-keydesc.png - - - type: bugfix - title: Fix corruption of user-info.json - body: >- - Fixed a race condition that logging in and logging out - rapidly could cause memory corruption or corruption of the - user-info.json cache file used when - authenticating with Ambassador Cloud. - - - type: bugfix - title: Improved DNS resolver for systemd-resolved - body: - Telepresence's systemd-resolved-based DNS resolver is now more - stable and in case it fails to initialize, the overriding resolver - will no longer cause general DNS lookup failures when telepresence defaults to - using it. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Faster telepresence list command - body: - The performance of telepresence list has been increased - significantly by reducing the number of calls the command makes to the cluster. - docs: reference/client - - - version: 2.3.6 - date: "2021-07-20" - notes: - - type: bugfix - title: Fix preview URLs - body: >- - Fixed a regression introduced in 2.3.5 that caused preview - URLs to not work. - - - type: bugfix - title: Fix subnet discovery - body: >- - Fixed a regression introduced in 2.3.5 where the Traffic - Manager's RoleBinding did not correctly appoint - the traffic-manager Role, causing - subnet discovery to not be able to work correctly. - docs: reference/rbac/ - - - type: bugfix - title: Fix root-user configuration loading - body: >- - Fixed a regression introduced in 2.3.5 where the root daemon - did not correctly read the configuration file; ignoring the - user's configured log levels and timeouts. - docs: reference/config/ - - - type: bugfix - title: Fix a user daemon crash - body: >- - Fixed an issue that could cause the user daemon to crash - during shutdown, as during shutdown it unconditionally - attempted to close a channel even though the channel might - already be closed. - - - version: 2.3.5 - date: "2021-07-15" - notes: - - type: feature - title: traffic-manager in multiple namespaces - body: >- - We now support installing multiple traffic managers in the same cluster. - This will allow operators to install deployments of telepresence that are - limited to certain namespaces. - image: ./telepresence-2.3.5-traffic-manager-namespaces.png - docs: install/helm - - type: feature - title: No more dependence on kubectl - body: >- - Telepresence no longer depends on having an external - kubectl binary, which might not be present for - OpenShift users (who have oc instead of - kubectl). - - type: feature - title: Agent image now configurable - body: >- - We now support configuring which agent image + registry to use in the - config. This enables users whose laptop is an air-gapped environment to - create personal intercepts without requiring a login. It also makes it easier - for those who are developing on Telepresence to specify which agent image should - be used. Env vars TELEPRESENCE_AGENT_IMAGE and TELEPRESENCE_REGISTRY are no longer - used. - image: ./telepresence-2.3.5-agent-config.png - docs: reference/config/#images - - type: feature - title: Max gRPC receive size now configurable - body: >- - The default max size of messages received through gRPC (4 MB) is sometimes insufficient. It can now be configured. - image: ./telepresence-2.3.5-grpc-max-receive-size.png - docs: reference/config/#grpc - - type: feature - title: CLI can be used in air-gapped environments - body: >- - While Telepresence will auto-detect if your cluster is in an air-gapped environment, - we've added an option users can add to their config.yml to ensure the cli acts like it - is in an air-gapped environment. Air-gapped environments require a manually installed - licence. - docs: reference/cluster-config/#air-gapped-cluster - image: ./telepresence-2.3.5-skipLogin.png - - version: 2.3.4 - date: "2021-07-09" - notes: - - type: bugfix - title: Improved IP log statements - body: >- - Some log statements were printing incorrect characters, when they should have been IP addresses. - This has been resolved to include more accurate and useful logging. - docs: reference/config/#log-levels - image: ./telepresence-2.3.4-ip-error.png - - type: bugfix - title: Improved messaging when multiple services match a workload - body: >- - If multiple services matched a workload when performing an intercept, Telepresence would crash. - It now gives the correct error message, instructing the user on how to specify which - service the intercept should use. - image: ./telepresence-2.3.4-improved-error.png - docs: reference/intercepts - - type: bugfix - title: Traffic-manger creates services in its own namespace to determine subnet - body: >- - Telepresence will now determine the service subnet by creating a dummy-service in its own - namespace, instead of the default namespace, which was causing RBAC permissions issues in - some clusters. - docs: reference/routing/#subnets - - type: bugfix - title: Telepresence connect respects pre-existing clusterrole - body: >- - When Telepresence connects, if the traffic-manager's desired clusterrole already exists in the - cluster, Telepresence will no longer try to update the clusterrole. - docs: reference/rbac - - type: bugfix - title: Helm Chart fixed for clientRbac.namespaced - body: >- - The Telepresence Helm chart no longer fails when installing with --set clientRbac.namespaced=true. - docs: install/helm - - version: 2.3.3 - date: "2021-07-07" - notes: - - type: feature - title: Traffic Manager Helm Chart - body: >- - Telepresence now supports installing the Traffic Manager via Helm. - This will make it easy for operators to install and configure the - server-side components of Telepresence separately from the CLI (which - in turn allows for better separation of permissions). - image: ./telepresence-2.3.3-helm.png - docs: install/helm/ - - type: feature - title: Traffic-manager in custom namespace - body: >- - As the traffic-manager can now be installed in any - namespace via Helm, Telepresence can now be configured to look for the - Traffic Manager in a namespace other than ambassador. - This can be configured on a per-cluster basis. - image: ./telepresence-2.3.3-namespace-config.png - docs: reference/config - - type: feature - title: Intercept --to-pod - body: >- - telepresence intercept now supports a - --to-pod flag that can be used to port-forward sidecars' - ports from an intercepted pod. - image: ./telepresence-2.3.3-to-pod.png - docs: reference/intercepts - - type: change - title: Change in migration from edgectl - body: >- - Telepresence no longer automatically shuts down the old - api_version=1 edgectl daemon. If migrating - from such an old version of edgectl you must now manually - shut down the edgectl daemon before running Telepresence. - This was already the case when migrating from the newer - api_version=2 edgectl. - - type: bugfix - title: Fixed error during shutdown - body: >- - The root daemon no longer terminates when the user daemon disconnects - from its gRPC streams, and instead waits to be terminated by the CLI. - This could cause problems with things not being cleaned up correctly. - - type: bugfix - title: Intercepts will survive deletion of intercepted pod - body: >- - An intercept will survive deletion of the intercepted pod provided - that another pod is created (or already exists) that can take over. - - version: 2.3.2 - date: "2021-06-18" - notes: - # Headliners - - type: feature - title: Service Port Annotation - body: >- - The mutator webhook for injecting traffic-agents now - recognizes a - telepresence.getambassador.io/inject-service-port - annotation to specify which port to intercept; bringing the - functionality of the --port flag to users who - use the mutator webook in order to control Telepresence via - GitOps. - image: ./telepresence-2.3.2-svcport-annotation.png - docs: reference/cluster-config#service-port-annotation - - type: feature - title: Outbound Connections - body: >- - Outbound connections are now routed through the intercepted - Pods which means that the connections originate from that - Pod from the cluster's perspective. This allows service - meshes to correctly identify the traffic. - docs: reference/routing/#outbound - - type: change - title: Inbound Connections - body: >- - Inbound connections from an intercepted agent are now - tunneled to the manager over the existing gRPC connection, - instead of establishing a new connection to the manager for - each inbound connection. This avoids interference from - certain service mesh configurations. - docs: reference/routing/#inbound - - # RBAC changes - - type: change - title: Traffic Manager needs new RBAC permissions - body: >- - The Traffic Manager requires RBAC - permissions to list Nodes, Pods, and to create a dummy - Service in the manager's namespace. - docs: reference/routing/#subnets - - type: change - title: Reduced developer RBAC requirements - body: >- - The on-laptop client no longer requires RBAC permissions to list the Nodes - in the cluster or to create Services, as that functionality - has been moved to the Traffic Manager. - - # Bugfixes - - type: bugfix - title: Able to detect subnets - body: >- - Telepresence will now detect the Pod CIDR ranges even if - they are not listed in the Nodes. - image: ./telepresence-2.3.2-subnets.png - docs: reference/routing/#subnets - - type: bugfix - title: Dynamic IP ranges - body: >- - The list of cluster subnets that the virtual network - interface will route is now configured dynamically and will - follow changes in the cluster. - - type: bugfix - title: No duplicate subnets - body: >- - Subnets fully covered by other subnets are now pruned - internally and thus never superfluously added to the - laptop's routing table. - docs: reference/routing/#subnets - - type: change # not a bugfix, but it only makes sense to mention after the above bugfixes - title: Change in default timeout - body: >- - The trafficManagerAPI timeout default has - changed from 5 seconds to 15 seconds, in order to facilitate - the extended time it takes for the traffic-manager to do its - initial discovery of cluster info as a result of the above - bugfixes. - - type: bugfix - title: Removal of DNS config files on macOS - body: >- - On macOS, files generated under - /etc/resolver/ as the result of using - include-suffixes in the cluster config are now - properly removed on quit. - docs: reference/routing/#macos-resolver - - - type: bugfix - title: Large file transfers - body: >- - Telepresence no longer erroneously terminates connections - early when sending a large HTTP response from an intercepted - service. - - type: bugfix - title: Race condition in shutdown - body: >- - When shutting down the user-daemon or root-daemon on the - laptop, telepresence quit and related commands - no longer return early before everything is fully shut down. - Now it can be counted on that by the time the command has - returned that all of the side-effects on the laptop have - been cleaned up. - - version: 2.3.1 - date: "2021-06-14" - notes: - - title: DNS Resolver Configuration - body: "Telepresence now supports per-cluster configuration for custom dns behavior, which will enable users to determine which local + remote resolver to use and which suffixes should be ignored + included. These can be configured on a per-cluster basis." - image: ./telepresence-2.3.1-dns.png - docs: reference/config - type: feature - - title: AlsoProxy Configuration - body: "Telepresence now supports also proxying user-specified subnets so that they can access external services only accessible to the cluster while connected to Telepresence. These can be configured on a per-cluster basis and each subnet is added to the TUN device so that requests are routed to the cluster for IPs that fall within that subnet." - image: ./telepresence-2.3.1-alsoProxy.png - docs: reference/config - type: feature - - title: Mutating Webhook for Injecting Traffic Agents - body: "The Traffic Manager now contains a mutating webhook to automatically add an agent to pods that have the telepresence.getambassador.io/traffic-agent: enabled annotation. This enables Telepresence to work well with GitOps CD platforms that rely on higher level kubernetes objects matching what is stored in git. For workloads without the annotation, Telepresence will add the agent the way it has in the past" - image: ./telepresence-2.3.1-inject.png - docs: reference/rbac - type: feature - - title: Traffic Manager Connect Timeout - body: "The trafficManagerConnect timeout default has changed from 20 seconds to 60 seconds, in order to facilitate the extended time it takes to apply everything needed for the mutator webhook." - image: ./telepresence-2.3.1-trafficmanagerconnect.png - docs: reference/config - type: change - - title: Fix for large file transfers - body: "Fix a tun-device bug where sometimes large transfers from services on the cluster would hang indefinitely" - image: ./telepresence-2.3.1-large-file-transfer.png - docs: reference/tun-device - type: bugfix - - title: Brew Formula Changed - body: "Now that the Telepresence rewrite is the main version of Telepresence, you can install it via Brew like so: brew install datawire/blackbird/telepresence." - image: ./telepresence-2.3.1-brew.png - docs: install/ - type: change - - version: 2.3.0 - date: "2021-06-01" - notes: - - title: Brew install Telepresence - body: "Telepresence can now be installed via brew on macOS, which makes it easier for users to stay up-to-date with the latest telepresence version. To install via brew, you can use the following command: brew install datawire/blackbird/telepresence2." - image: ./telepresence-2.3.0-homebrew.png - docs: install/ - type: feature - - title: TCP and UDP routing via Virtual Network Interface - body: "Telepresence will now perform routing of outbound TCP and UDP traffic via a Virtual Network Interface (VIF). The VIF is a layer 3 TUN-device that exists while Telepresence is connected. It makes the subnets in the cluster available to the workstation and will also route DNS requests to the cluster and forward them to intercepted pods. This means that pods with custom DNS configuration will work as expected. Prior versions of Telepresence would use firewall rules and were only capable of routing TCP." - image: ./tunnel.jpg - docs: reference/tun-device - type: feature - - title: SSH is no longer used - body: "All traffic between the client and the cluster is now tunneled via the traffic manager gRPC API. This means that Telepresence no longer uses ssh tunnels and that the manager no longer have an sshd installed. Volume mounts are still established using sshfs but it is now configured to communicate using the sftp-protocol directly, which means that the traffic agent also runs without sshd. A desired side effect of this is that the manager and agent containers no longer need a special user configuration." - image: ./no-ssh.png - docs: reference/tun-device/#no-ssh-required - type: change - - title: Running in a Docker container - body: "Telepresence can now be run inside a Docker container. This can be useful for avoiding side effects on a workstation's network, establishing multiple sessions with the traffic manager, or working with different clusters simultaneously." - image: ./run-tp-in-docker.png - docs: reference/inside-container - type: feature - - title: Configurable Log Levels - body: "Telepresence now supports configuring the log level for Root Daemon and User Daemon logs. This provides control over the nature and volume of information that Telepresence generates in daemon.log and connector.log." - image: ./telepresence-2.3.0-loglevels.png - docs: reference/config/#log-levels - type: feature - - version: 2.2.2 - date: "2021-05-17" - notes: - - title: Legacy Telepresence subcommands - body: Telepresence is now able to translate common legacy Telepresence commands into native Telepresence commands. So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used to with the new Telepresence binary. - image: ./telepresence-2.2.png - docs: install/migrate-from-legacy/ - type: feature diff --git a/docs/pre-release/troubleshooting/index.md b/docs/pre-release/troubleshooting/index.md deleted file mode 100644 index 21ff5405..00000000 --- a/docs/pre-release/troubleshooting/index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an intercept did not generate a preview URL - -Preview URLs can only be created if Telepresence is [logged in to -Ambassador Cloud](../reference/client/login/). When not logged in, it -will not even try to create a preview URL (additionally, by default it -will intercept all traffic rather than just a subset of the traffic). -Remove the intercept with `telepresence leave [deployment name]`, run -`telepresence login` to login to Ambassador Cloud, then recreate the -intercept. See the [intercepts how-to doc](../howtos/intercepts) for -more details. - -## Error on accessing preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on accessing preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Connecting to a cluster via VPN doesn't work. - -There are a few different issues that could arise when working with a VPN. Please see the [dedicated page](../reference/vpn) on Telepresence and VPNs to learn more on how to fix these. - -## Your GitHub organization isn't listed - -Ambassador Cloud needs access granted to your GitHub organization as a -third-party OAuth app. If an organization isn't listed during login -then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → -**Settings** → **Applications** → **Authorized OAuth Apps** → -**Ambassador Labs**. An organization owner will have a **Grant** -button, anyone not an owner will have **Request** which sends an email -to the owner. If an access request has been denied in the past the -user will not see the **Request** button, they will have to reach out -to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in; -you should see the GitHub organization listed. - -The organization owner can go to the **GitHub menu** → **Your -organizations** → **[org name]** → **Settings** → **Third-party -access** to see if Ambassador Labs has access already or authorize a -request for access (only owners will see **Settings** on the -organization page). Clicking the pencil icon will show the -permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or requesting access on initial login - -When using GitHub as your identity provider, the first time you log in -to Ambassador Cloud GitHub will ask to authorize Ambassador Labs to -access your organizations and certain user data. - - - -Any listed organization with a green check has already granted access -to Ambassador Labs (you still need to authorize to allow Ambassador -Labs to read your user data and organization membership). - -Any organization with a red "X" requires access to be granted to -Ambassador Labs. Owners of the organization will see a **Grant** -button. Anyone who is not an owner will see a **Request** button. -This will send an email to the organization owner requesting approval -to access the organization. If an access request has been denied in -the past the user will not see the **Request** button, they will have -to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud -then back in to select the organization. - -### Volume mounts are not working on macOS - -It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. Lately there's been some issues using `brew install sshfs` a macOS workstation because the required component `osxfuse` (now named `macfuse`) isn't open source and hence, no longer supported. As a workaround, you can now use `gromgit/fuse/sshfs-mac` instead. Follow these steps: - -1. Remove old sshfs, macfuse, osxfuse using `brew uninstall` -2. `brew install --cask macfuse` -3. `brew install gromgit/fuse/sshfs-mac` -4. `brew link --overwrite sshfs-mac` - -Now sshfs -V shows you the correct version, e.g.: -``` -$ sshfs -V -SSHFS version 2.10 -FUSE library version: 2.9.9 -fuse: no mount point -``` - -but one more thing must be done before it works OK: -5. Try a mount (or an intercept that performs a mount). It will fail because you need to give permission to “Benjamin Fleischer” to execute a kernel extension (a pop-up appears that takes you to the system preferences). -6. Approve the needed permission -7. Reboot your computer. diff --git a/docs/pre-release/tutorial.md b/docs/pre-release/tutorial.md deleted file mode 100644 index 77b55591..00000000 --- a/docs/pre-release/tutorial.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for -your OS to install it and log in to Ambassador Cloud in your browser. -Follow the prompts to log in with GitHub then select your -organization. You will be redirected to the Ambassador Cloud -dashboard; later you will manage your preview URLs here. - -### macOS - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -### Linux - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and - logging in to Ambassador Cloud. The Cloud dashboard is used to - manage your intercepts and share them with colleagues. You must be - logged in to create personal intercepts as we are going to do here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/pre-release/versions.yml b/docs/pre-release/versions.yml deleted file mode 100644 index a8bf8c43..00000000 --- a/docs/pre-release/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.5.8" -dlVersion: "latest" -docsVersion: "2.5" -branch: release/v2 -productName: "Telepresence" diff --git a/docs/v1/discussion/how-it-works.md b/docs/v1/discussion/how-it-works.md deleted file mode 100644 index 5aa3e0bc..00000000 --- a/docs/v1/discussion/how-it-works.md +++ /dev/null @@ -1,102 +0,0 @@ -# How it works - -### Goals - -Our goals for Telepresence are: - -1. **Transparency:** make the local proxied process match the Kubernetes environment as closely as possible. -2. **Isolation:** only the proxied process has its environment modified. - This goal is much like that of a container: an isolated process-specific environment. -3. **Cross-platform:** Linux and macOS work the same way, when possible. - Linux provides far more capabilities (mount namespaces, bind mounts, network namespaces) than macOS. -4. **Compatibility:** works with any program. - -Achieving all these goals at the same time is not always possible. -We've therefore chosen to support more than one method of proxying, with the different methods each having its own [benefits and limitations](/reference/methods.html). - -### How it works - -Telepresence works by building a two-way network proxy (bootstrapped using `kubectl port-forward` or `oc port-forward`) between a custom pod running inside a remote (or local) Kubernetes cluster and a process running on your development machine. -The custom pod is substituted for your normal pod that would run in production. -Typically you'd want to do this to a testing or staging cluster, not your production cluster. - -Environment variables from the remote pod are made available to your local process. -In addition, the local process has its networking transparently overridden such that DNS calls and TCP connections are routed over the proxy to the remote Kubernetes cluster. -This happens one of two ways: - -* When using `--method vpn-tcp`, the default, a VPN-like tunnel is created using a program called [sshuttle](http://sshuttle.readthedocs.io/), which tunnels the packets over the SSH connection, and forwards DNS queries to a DNS proxy in the cluster. -* When using `--method inject-tcp` this is implemented using `LD_PRELOAD`/`DYLD_INSERT_LIBRARIES` mechanism on Linux/OSX, where a shared library can be injected into a process and override library calls. - In particular, it overrides DNS resolution and TCP connection and routes them via a SOCKS proxy to the cluster. - We wrote [a blog post](https://www.getambassador.io/resources/code-injection-on-linux-and-macos/) explaining LD_PRELOAD in more detail. - -(Technically there is a third method, for proxying containers, that is based on the `vpn-tcp` method.) - -Volumes are proxied using [sshfs](https://github.com/libfuse/sshfs), with their location available to the container as an environment variable. - -The result is that your local process has a similar environment to the remote Kubernetes cluster, while still being fully under your local control. - -### vpn-tcp method in detail - -Telepresence figures out the CIDR for Kubernetes Pods and Services, and any cloud hosts specified with `--also-proxy`, and tells `sshuttle` to forward traffic to those IPs via the proxy Pod running in Kubernetes. -`sshuttle` effectively acts as a VPN for these packets. - -For DNS the implementation is more complex, since base `sshuttle` is insufficient. -`sshuttle` will capture all packets going to your default nameservers, and instead forward them to a custom DNS server running inside the Telepresence Pod in the Kubernetes cluster. -For now we'll assume this is a remote cluster. -Some examples can help explain the process. - -Whenever you do a DNS lookup your DNS client library may add a suffix, try to resolve that, and if that fails try the original hostname you provided. -For example, if you're working at the offices of Example.com then the DHCP server in your office may tell clients to add `example.com` to DNS lookups. -Thus, when you lookup the domain `myservice` your DNS client will first try `myservice.example.com` and then `myservice` if that doesn't work. - -On startup: - -1. `telepresence` does a lookup of `hellotelepresence`. -2. Your DNS client library turns this into a lookup of `hellotelepresence.example.com`. -3. `sshuttle` forwards this to the custom Telepresence DNS server. -4. The Telepresence DNS server recognizes the `hellotelepresence` marker, and so now it knows the suffix it needs to filter is `example.com`. - -Next let's say you do: - -```console -$ curl http://myservice:8080 -``` - -inside a Telepresence-proxied shell. - -1. Your DNS client library turns this into a lookup of `myservice.example.com`. -2. `sshuttle` forwards this to the custom Telepresence DNS server. -3. The custom Telepresence DNS server strips off `example.com`, and does a local DNS lookup of `myservice`. -4. The Kubernetes DNS server replies with the IP of the `myservice` Service. -5. The custom Telepresence DNS server hands this back, `sshuttle` forwards it back, and eventually `curl` gets the Service IP. -6. `curl` opens connection to that IP, `sshuttle` forwards it to the Kubernetes cluster. - -#### minikube/minishift and Docker Desktop - -There is an additional complication when running a cluster locally in a VM, using something like minikube, minishift, or Docker Desktop. -Let's say you lookup `google.com`. - -1. `sshuttle` forwards `google.com` to Kubernetes (via Telepresence DNS server). -2. Kubernetes DNS server doesn't know about any such Service, so it does normal DNS lookup. -3. The normal DNS lookup might get routed via the host machine. -4. `sshuttle` captures all DNS lookups going from the host machine. -5. Your DNS lookup is now in an infinite loop. - -To solve this Telepresence will detect minikube, minishift, and Docker Desktop. -When it does, the Telepresence DNS server will forward DNS requests that aren't Kubernetes-specific to an external DNS server that is different than the ones your host machine is using. -E.g. it might use Google's public DNS if your host isn't. -As a result these DNS lookups aren't captured by `sshuttle` and the infinite loop is prevented. - -### inject-tcp method in detail - -A custom SOCKS proxy is run on the Kubernetes pod, which uses [Tor's extended SOCKSv5 protocol](https://gitweb.torproject.org/torsocks.git/tree/doc/socks/socks-extensions.txt) which adds support for DNS lookups. -`kubectl port-forward` creates a tunnel to this SOCKS proxy. - -A subprocess is then run using `torsocks`, a library that uses `LD_PRELOAD` to override TCP and (most) DNS lookups so that they get routed via the SOCKSv5 proxy. - -### Docker method in detail - -Telepresence can also proxy a Docker container, using a variant on the `vpn-tcp` method: `sshuttle` is run inside a Docker container. - -The user's specified Docker container is then run using the same network namespace as the proxy container with `sshuttle`. -(In typical Docker usage, in contrast, each container gets its own separate network namespace by default.) diff --git a/docs/v1/discussion/overview.md b/docs/v1/discussion/overview.md deleted file mode 100644 index f00b7072..00000000 --- a/docs/v1/discussion/overview.md +++ /dev/null @@ -1,38 +0,0 @@ -# Introduction to Telepresence - -Telepresence is an open source tool that lets you run a single service locally, while connecting that service to a remote Kubernetes cluster. This lets developers working on multi-service applications to: - -1. Do fast local development of a single service, even if that service depends on other services in your cluster. Make a change to your service, save, and you can immediately see the new service in action. - -2. Use any tool installed locally to test/debug/edit your service. For example, you can use a debugger or IDE! - -3. Make your local development machine operate as if it's part of your Kubernetes cluster. If you've got an application on your machine that you want to run against a service in the cluster -- it's easy to do. - -Telepresence works on both Mac OS X and Linux, with [OS-native packages](/reference/install.html). - -## How it works - -Telepresence deploys a two-way network proxy in a pod running in your Kubernetes cluster. This pod proxies data from your Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. The local process has its networking transparently overridden so that DNS calls and TCP connections are routed over the proxy to the remote Kubernetes cluster. - -This approach gives: - -* your local service full access to other services in the remote cluster -* your local service full access to Kubernetes environment variables, secrets, and ConfigMap -* your remote services full access to your local service - -How Telepresence works is discussed in more detail [here](/discussion/how-it-works.html). - -## Alternatives to Telepresence - -Typical alternatives to Telepresence include: - -* running your entire multi-service application locally via Docker Compose. This gives you a fast dev/debug cycle. However, it's less realistic since you're not running your services actually inside Kubernetes, and there are cloud services you might use (e.g., a database) that might not be easy to use locally. -* minikube. You can't do live coding/debugging with minikube by itself, but you can with Telepresence. The two work well together. -* run everything in a remote Kubernetes cluster. Again, you can't do live coding/debugging in a remote Kubernetes cluster ... but you can with Telepresence. - -## Getting started - -Telepresence offers a broad set of [proxying options](/reference/methods.html) which have different strengths and weaknesses. Generally speaking, we recommend you: - -* Start with the container method, which provides the most consistent environment for your code. Here is a [container quick start](/tutorials/docker.html). -* Use the VPN method, which lets you use an IDE or debugger with your code. Here is a [quick start that uses the VPN method](/tutorials/kubernetes-rapid.html) diff --git a/docs/v1/discussion/why-telepresence.md b/docs/v1/discussion/why-telepresence.md deleted file mode 100644 index ba97e7db..00000000 --- a/docs/v1/discussion/why-telepresence.md +++ /dev/null @@ -1,72 +0,0 @@ -# Why Telepresence? - -Let's assume you have a web service which listens on port 8080, and has a Dockerfile which gets built to an image called `examplecom/servicename`. -Your service depends on other Kubernetes `Service` instances (`thing1` and `thing2`), and on a cloud database. - -The Kubernetes staging environment looks like this: - -```mermaid -graph LR - subgraph Kubernetes in Cloud - code["k8s.Pod: servicename"] - s1["k8s.Service: servicename"]---code - code---s2["k8s.Service: thing1"] - code---s3["k8s.Service: thing2"] - code---c1>"Cloud Database (AWS RDS)"] - end -``` - -### The slow status quo - -If you need that cloud database and those two services to directly test your software, you will need to do the following to test a change: - -1. Change your code. -2. Build a Docker image. -3. Push the Docker image to a Docker registry in the cloud. -4. Update the staging Kubernetes cluster to use your new image. -5. Wait for the image to download. - -This is slow. - -```mermaid -graph TD - subgraph Laptop - code["Source code for servicename"]==>local["Docker image"] - kubectl - end - subgraph Kubernetes in Cloud - local==>registry["Docker registry"] - registry==>deployment["k8s.Deployment: servicename"] - kubectl==>deployment - s1["k8s.Service: servicename"]---deployment - deployment---s2["k8s.Service: thing1"] - deployment---s3["k8s.Service: thing2"] - deployment---c1>"Cloud Database (AWS RDS)"] - end -``` - -### A fast development cycle with Telepresence - -Telepresence works by running your code *locally*, as a normal local process, and then forwarding requests to/from the Kubernetes cluster. - -```mermaid -graph TD - subgraph Laptop - code["Source code for servicename"]==>local["local process"] - local---client[Telepresence client] - end - subgraph Kubernetes in Cloud - client-.-proxy["k8s.Pod: Telepresence proxy"] - s1["k8s.Service: servicename"]---proxy - proxy---s2["k8s.Service: thing1"] - proxy---s3["k8s.Service: thing2"] - proxy---c1>"Cloud Database (AWS RDS)"] - end -``` - -This means development is fast: you only have to change your code and restart your process. -Many web frameworks also do automatic code reload, in which case you won't even need to restart. - -### Other options - -For more discussion of the solution space for local development see the article [Development Environments for Microservices](https://dzone.com/articles/development-environments-for-microservices). diff --git a/docs/v1/doc-links.yml b/docs/v1/doc-links.yml deleted file mode 100644 index da2bd4a5..00000000 --- a/docs/v1/doc-links.yml +++ /dev/null @@ -1,79 +0,0 @@ - - title: Getting started - items: - - title: Introduction to Telepresence - link: discussion/overview - - title: Why Telepresence? - link: discussion/why-telepresence - - title: Installing Telepresence - link: reference/install - - title: Fast dev workflow with Docker/K8S - link: tutorials/docker - - - title: Tutorials - items: - - title: Debug a Kubernetes service locally - link: tutorials/kubernetes - - title: Connect to a remote Kubernetes cluster - link: tutorials/kubernetes-client - - title: Rapid development with Kubernetes - link: tutorials/kubernetes-rapid - - title: Get started with OpenShift - link: tutorials/openshift - - title: Minikube VPN access - link: tutorials/minikube-vpn - - title: Using a Kubernetes Client Library - link: tutorials/kubernetes-client-libs - - title: Using Telepresence with Golang - link: howto/golang - - title: Local Java development - link: tutorials/java - - title: Using Telepresence with IntelliJ - link: tutorials/intellij - - title: Using Telepresence with PHP - link: tutorials/php - - - title: Reference - items: - - title: Connecting to the cluster - link: reference/connecting - - title: Proxying methods - link: reference/methods - items: - - title: What gets proxied - link: reference/proxying - - title: Volume access - link: howto/volumes - - title: Running local processes - link: reference/running - - title: Generic limitations & workarounds - link: reference/limitations - - title: Windows support - link: reference/windows - - title: Usage Reporting - link: reference/usage_reporting - - title: How it works - link: discussion/how-it-works - - title: Upgrading Telepresence - link: reference/upgrade - - - title: Community - items: - - title: Development Guide - link: reference/developing - - title: Community - link: /community - - title: Related Projects - link: /related-projects - - title: Case Studies - link: /case-studies - - title: Changelog - link: reference/changelog - - - title: Need Help? - items: - - title: Ask on Slack - link: https://a8r.io/slack - - title: File a GitHub Issue - link: https://github.com/telepresenceio/telepresence/issues/new - - title: Visit Ambassador Labs - link: https://www.getambassador.io diff --git a/docs/v1/howto/golang.md b/docs/v1/howto/golang.md deleted file mode 100644 index 351da734..00000000 --- a/docs/v1/howto/golang.md +++ /dev/null @@ -1,48 +0,0 @@ -# Go support - -In this howto you'll learn how to use Telepresence with a Go program. -Because of the way Go is implemented you will need to use `--method vpn-tcp` with `telepresence`. - -### A Go program talking to Kubernetes - -First, start a web server inside Kubernetes and expose it via a service: - -```console -$ kubectl create deployment hello-world --image=datawire/hello-world -$ kubectl expose deployment hello-world --port=8000 -``` - -Next, install a neat little Go program called [wuzz](https://github.com/asciimoo/wuzz), an interactive HTTP client. - -```console -$ go get github.com/asciimoo/wuzz -``` - -Now we'll see how we can use wuzz to interact with a remote Kubernetes cluster. -`telepresence` will create a new `Deployment` inside Kubernetes that will act as a proxy, and then communication from the `wuzz` subprocess it runs will be forwarded to the cluster: - -```console -$ telepresence --run $GOPATH/bin/wuzz http://hello-world:8000/ -``` - -**Important:** Go programs will *not* work with `--method inject-tcp` option. - -The `wuzz` UI will appear with the URL `http://hello-world:8000/`. -Hit Enter and you should see the "Hello, World!" response from the Kubernetes service. -You can also interact with the Kubernetes API - change the URL to `https://kubernetes/` (but typically you'll have problems with the custom certificate authority.) - -### Kubernetes talks to a Go program - -You can also run a Go program as a local server and have requests to your Kubernetes `Deployment` forwarded to that process. -This is just the same as the example covered in [the tutorial](/tutorials/kubernetes.html) except that you use `--method vpn-tcp`, and run a Go process instead of a Python process. - -For example, if you have a `Deployment` called `myservice` running in Kubernetes and listening on port 8080, you can temporarily swap it out for a local process and have traffic forwarded to your laptop: - -```console -$ telepresence --swap-deployment myservice --expose 8080 \ - --run ./yourgoserver --port=8080 -``` - -Now requests to that remote `Deployment` will be routed to the `yourgoserver` process running on your machine. - -You can learn more about the differences between `--new-deployment` and `--swap-deployment` in the relevant [reference documentation](/reference/connecting.html). diff --git a/docs/v1/howto/volumes.md b/docs/v1/howto/volumes.md deleted file mode 100644 index 15dad293..00000000 --- a/docs/v1/howto/volumes.md +++ /dev/null @@ -1,119 +0,0 @@ -# Volume access - -Volume support requires a small amount of work on your part. -The root directory where all the volumes can be found will be set to the `TELEPRESENCE_ROOT` environment variable in the shell run by `telepresence`. -You will then need to use that env variable as the root for volume paths you are opening. - -Telepresence will attempt to gather the mount points that exist in the remote pod and list them in the `TELEPRESENCE_MOUNTS` environment variable, separated by `:` characters. -This allows automated discovery of remote volumes. - -For example, all Kubernetes containers have a volume mounted at `/var/run/secrets` with the service account details. -Those files are accessible from Telepresence: - -```console -$ telepresence -T: [...] -T: Setup complete. Launching your command. -@tel-testing|bash-3.2$ echo $TELEPRESENCE_ROOT -/tmp/tel-6cjjs3ba/fs -@tel-testing|bash-3.2$ echo $TELEPRESENCE_MOUNTS -/var/run/secrets/kubernetes.io/serviceaccount -@tel-testing|bash-3.2$ ls $TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io/serviceaccount/ -ca.crt namespace token -``` - -The files are available at a different path than they are on the actual Kubernetes environment. - -One way to deal with that is to change your application's code slightly. -For example, let's say you have a volume that mounts a file called `/app/secrets`. -Normally you would just open it in your code like so: - - -```python -secret_file = open("/app/secrets") -``` - -To support volume proxying by Telepresence, you will need to change your code, for example: - -```python -volume_root = "/" -if "TELEPRESENCE_ROOT" in os.environ: - volume_root = os.environ["TELEPRESENCE_ROOT"] -secret_file = open(os.path.join(volume_root, "app/secrets")) -``` - -By falling back to `/` when the environment variable is not set your code will continue to work in its normal Kubernetes setting. - -This approach is unavailable if you do not control the code that accesses the mounted filesystem, such as if you use a third-party library. -However, many such libraries offer configuration to work around this. -For example, the Java Kubernetes client library allows [configuration via environment variables](https://github.com/fabric8io/kubernetes-client#configuring-the-client). - -To simplify this process, Telepresence optionally lets you set the value of `TELEPRESENCE_ROOT` to a known path using the `--mount` option. -Using a known value as the mount point (e.g., `--mount=/tmp/tel_root`) can you let you configure your tools and libraries once and rely on that configuration continuing to work across multiple Telepresence sessions. -When using the container method, the `--mount` option allows bind-mounting portions of the remote filesystem directly onto the usual paths. - -For example, the `kubectl` command expects to find Kubernetes API service account credentials in `/var/run/secrets`. -This example shows `kubectl` successfully talking to the cluster while running in a local container: - -```shell -$ telepresence --mount=/tmp/known --docker-run --rm -it -v=/tmp/known/var/run/secrets:/var/run/secrets lachlanevenson/k8s-kubectl version --short -Volumes are rooted at $TELEPRESENCE_ROOT. See https://telepresence.io/howto/volumes.html for details. - -Client Version: v1.10.1 -Server Version: v1.7.14-gke.1 -``` - -Another way you can do this is by using the [proot](http://proot-me.github.io/) utility on Linux, which allows you to do fake bind mounts without being root. -For example, presuming you've installed `proot` (`apt install proot` on Ubuntu), in the following example we bind `$TELEPRESENCE_ROOT/var/run/secrets` to `/var/run/secrets`. -That means code doesn't need to be modified as the paths are in the expected location: - -```console -@minikube|$ proot -b $TELEPRESENCE_ROOT/var/run/secrets/:/var/run/secrets bash -$ ls /var/run/secrets/kubernetes.io/serviceaccount/ -ca.crt namespace token -``` - -Using the `TELEPRESENCE_MOUNTS` environment variable allows for automatic discovery and handling of mount points. -For example, the following Python code will create symlinks so that any mounted volumes appear in their normal locations: - -```python -def telepresence_remote_mounts(): - mounts = os.environ.get('TELEPRESENCE_MOUNTS') - if not mounts: - return - - tel_root = os.environ.get('TELEPRESENCE_ROOT') - - for mount in mounts.split(':'): - dir_name, link_name = os.path.split(mount) - os.makedirs(dir_name, exist_ok=True) - - link_src = os.path.join(tel_root, mount[1:]) - os.symlink(link_src, mount) -``` - -## Volume access via Docker volume for the container method - -It is possible to use the [vieux/sshfs](https://github.com/vieux/docker-volume-sshfs) volume driver to access remote volumes if you are using the container method. -This makes it possible to use volumes even if you don't have mount privileges or capabilities on your main system, e.g. on Windows. -Use the `--docker-mount` option to specify an absolute path for volume mounts inside your container. -Note that `--docker-mount` is mutually exclusive with `--mount` and is available only with the container method. - -When `--docker-mount` is specified, instead of using `sshfs` to mount the remote filesystem on your host, a randomly named docker volume is created using the vieux/sshfs volume driver. -The volume is mounted in the user container at the mount point specified by `--docker-mount`. -As a side benefit, `sudo` is no longer required for the container method (unless you need `sudo` to run Docker). - -One downside is that it is not possible to mount a subdirectory of the remote volumes at another location in your container. -This means that the `/var/run/secrets` workaround described above cannot be done with `--docker-mount`. -Using `$TELEPRESENCE_ROOT` is required. - - -## Ensuring write permissions - -If you are experiencing errors such as `Operation not permitted` or `permission denied` your processes might lack write permissions. -This happens because the volume mounted by Telepresence is owner by the privileged user (root, admin, etc), whereas the image might not have the same privileges. - -Since Telepresence doesn't allow to you specify using a privileged image explicitly, as a workaround, you can expose a low port, i.e. less than 1024: `--expose 9999:81` -This works because a low port requires privileged network access, Telepresence detects this and chooses the privileged image, causing all the proxy pod processes to run as root. - - diff --git a/docs/v1/macros.js b/docs/v1/macros.js deleted file mode 100644 index 3cbe66f1..00000000 --- a/docs/v1/macros.js +++ /dev/null @@ -1,240 +0,0 @@ -import React from 'react'; - -import Markdown from '@src/components/Markdown'; - -export function Install({install, command, cluster, location}) { - return ( -
- Install Telepresence with Homebrew/apt/dnf - -

You will need the following available on your machine:

-
    -
  • { command } command line tool (here's the installation instructions).
  • -
  • Access to your { cluster } cluster, with local credentials on your machine. You can test this by running { command } get pod - if this works you're all set.
  • -
- - - -
- ); -} - -export function InstallSpecific({location}) { - return ( - {` -#### OS X - -On OS X you can install Telepresence by running the following: - -\`\`\`shell -brew install --cask osxfuse -brew install datawire/blackbird/telepresence-legacy -\`\`\` - -#### Ubuntu 16.04 or later - -Run the following to install Telepresence: - -\`\`\`shell -curl -s https://packagecloud.io/install/repositories/datawireio/telepresence/script.deb.sh | sudo bash -sudo apt install --no-install-recommends telepresence -\`\`\` - -If you are running another Debian-based distribution that has Python 3.5 installable as \`python3\`, you may be able to use the Ubuntu 16.04 (Xenial) packages. The following works on Linux Mint 18.2 (Sonya) and Debian 9 (Stretch) by forcing the PackageCloud installer to access Xenial packages. - -\`\`\`shell -curl -sO https://packagecloud.io/install/repositories/datawireio/telepresence/script.deb.sh -sudo env os=ubuntu dist=xenial bash script.deb.sh -sudo apt install --no-install-recommends telepresence -rm script.deb.sh -\`\`\` - -A similar approach may work on Debian-based distributions with Python 3.6 by using the Ubuntu 17.10 (Artful) packages. - -#### Fedora 26 or later - -Run the following: - -\`\`\`shell -curl -s https://packagecloud.io/install/repositories/datawireio/telepresence/script.rpm.sh | sudo bash -sudo dnf install telepresence -\`\`\` - -If you are running a Fedora-based distribution that has Python 3.6 installable as \`python3\`, you may be able to use Fedora packages. See the Ubuntu section above for information on how to invoke the PackageCloud installer script to force OS and distribution. - -#### Arch Linux - -Until we have a *correct and working* AUR package, please install from source. See [issue #135](https://github.com/telepresenceio/telepresence/issues/135) for the latest information. - -#### Windows - -See the [Windows support documentation](/reference/windows.html). - -#### Install from source - -On systems with Python 3.5 or newer, install into \`/usr/local/share/telepresence\` and \`/usr/local/bin\` by running: - -\`\`\`shell -sudo env PREFIX=/usr/local ./install.sh -\`\`\` - -Install the software from the [list of dependencies](/reference/install.html#dependencies) to finish. - -Install into arbitrary locations by setting other environment variables before calling the install script. [See the install script](https://github.com/telepresenceio/telepresence/blob/master/install.sh) for more information. After installation you can safely delete the source code. - -#### Other platforms - -Don't see your favorite platform? [Let us know](https://github.com/telepresenceio/telepresence/issues/new) and we'll try to add it. Also try installing from source. -`} - ); -} - -export function GettingStartedPart1({cluster}) { - return ( - {` -### Debugging a service locally with Telepresence - -Imagine you have a service running in a staging cluster, and someone reports a bug against it. -In order to figure out the problem you want to run the service locally... but the service depends on other services in the cluster, and perhaps on cloud resources like a database. - -In this tutorial you'll see how Telepresence allows you to debug your service locally. -We'll use the \`telepresence\` command line tool to swap out the version running in the staging cluster for a debug version under your control running on your local machine. -Telepresence will then forward traffic from ${ cluster } to the local process. -`} - ); -} - -export function GettingStartedPart2({deployment, command, cluster}) { - return ( - {` -Once you know the address you can store its value (don't forget to replace this with the real address!): - -\`\`\`console -$ export HELLOWORLD=http://104.197.103.13:8000 -\`\`\` - -And you send it a query and it will be served by the code running in your cluster: - -\`\`\`console -$ curl $HELLOWORLD/ -Hello, world! -\`\`\` - -#### Swapping your deployment with Telepresence - -**Important:** Starting \`telepresence\` the first time may take a little while, since ${ cluster } needs to download the server-side image. - -At this point you want to switch to developing the service locally, replace the version running on your cluster with a custom version running on your laptop. -To simplify the example we'll just use a simple HTTP server that will run locally on your laptop: - -\`\`\`console -$ mkdir /tmp/telepresence-test -$ cd /tmp/telepresence-test -$ echo "hello from your laptop" > file.txt -$ python3 -m http.server 8001 & -[1] 2324 -$ curl http://localhost:8001/file.txt -hello from your laptop -$ kill %1 -\`\`\` - -We want to expose this local process so that it gets traffic from ${ cluster }, replacing the existing \`hello-world\` deployment. - -**Important:** you're about to expose a web server on your laptop to the Internet. -This is pretty cool, but also pretty dangerous! -Make sure there are no files in the current directory that you don't want shared with the whole world. - -Here's how you should run \`telepresence\` (you should make sure you're still in the \`/tmp/telepresence-test\` directory you created above): - -\`\`\`console -$ cd /tmp/telepresence-test -$ telepresence --swap-deployment hello-world --expose 8000 \ ---run python3 -m http.server 8000 & -\`\`\` - -This does three things: - -* Starts a VPN-like process that sends queries to the appropriate DNS and IP ranges to the cluster. -* \`--swap-deployment\` tells Telepresence to replace the existing \`hello-world\` pod with one running the Telepresence proxy. On exit, the old pod will be restored. -* \`--run\` tells Telepresence to run the local web server and hook it up to the networking proxy. - -As long as you leave the HTTP server running inside \`telepresence\` it will be accessible from inside the ${ cluster } cluster. -You've gone from this... - -\`\`\`mermaid -graph RL -subgraph ${ cluster } in Cloud -server["datawire/hello-world server on port 8000"] -end -\`\`\` - -...to this: - -\`\`\`mermaid -graph RL -subgraph Laptop -code["python HTTP server on port 8000"]---client[Telepresence client] -end -subgraph ${ cluster } in Cloud -client-.-proxy["Telepresence proxy, listening on port 8000"] -end -\`\`\` - -We can now send queries via the public address of the \`Service\` we created, and they'll hit the web server running on your laptop instead of the original code that was running there before. -Wait a few seconds for the Telepresence proxy to startup; you can check its status by doing: - -\`\`\`console -$ ${ command } get pod | grep hello-world -hello-world-2169952455-874dd 1/1 Running 0 1m -hello-world-3842688117-0bzzv 1/1 Terminating 0 4m -\`\`\` - -Once you see that the new pod is in \`Running\` state you can use the new proxy to connect to the web server on your laptop: - -\`\`\`console -$ curl $HELLOWORLD/file.txt -hello from your laptop -\`\`\` - -Finally, let's kill Telepresence locally so you don't have to worry about other people accessing your local web server by bringing it to the foreground and hitting Ctrl-C: - -\`\`\`console -$ fg -telepresence --swap-deployment hello-world --expose 8000 --run python3 -m http.server 8000 -^C -Keyboard interrupt received, exiting. -\`\`\` - -Now if we wait a few seconds the old code will be swapped back in. -Again, you can check status of swap back by running: - -\`\`\`console -$ ${ command } get pod | grep hello-world -\`\`\` - -When the new pod is back to \`Running\` state you can see that everything is back to normal: - -\`\`\`console -$ curl $HELLOWORLD/file.txt -Hello, world! -\`\`\` - ----- - -> **What you've learned:** Telepresence lets you replace an existing deployment with a proxy that reroutes traffic to a local process on your machine. -> This allows you to easily debug issues by running your code locally, while still giving your local process full access to your staging or testing cluster. - ----- - -Now it's time to clean up the service: -`} - ); -} - -export function TutorialFooter({title, path, baseUrl}) { - return ( - {` -**Still have questions? Ask in our [Slack chatroom](https://a8r.io/slack) or [file an issue on GitHub](https://github.com/telepresenceio/telepresence/issues/new).** -`} - ); -} diff --git a/docs/v1/redirects.yml b/docs/v1/redirects.yml deleted file mode 100644 index 62ecfe61..00000000 --- a/docs/v1/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "discussion/overview"} diff --git a/docs/v1/reference/changelog.md b/docs/v1/reference/changelog.md deleted file mode 100644 index 8f7570da..00000000 --- a/docs/v1/reference/changelog.md +++ /dev/null @@ -1,1114 +0,0 @@ -# Changelog - - - -#### Announcement: Telepresence 2 is now Available -Telepresence 2 is open source and available to [use now](/docs/latest/). Rewritten in Go to be faster and more resilient, Telepresence 2 has a [new architecture](/docs/latest/reference/architecture/) to better serve Kubernetes development teams with multiple users. [Try it today](/docs/latest/quick-start/) and [share your feedback](https://a8r.io/slack) with the team. - -#### 0.109 (January 25, 2021) - -Bug fixes: - -* When using the vpn-tcp method on Linux, the initial `iptables` check will not hang on DNS lookups. - Thanks to Peter Janes for the patch. - ([#1476](https://github.com/telepresenceio/telepresence/issues/1476)) -* The swap deployment operation no longer fails on Deployments that have startup probes configured. - Thanks to GitHub user deicon and Anton Troshin for the patch. - ([#1479](https://github.com/telepresenceio/telepresence/issues/1479)) - -Misc: - -* No longer provide pre-built packages for Ubuntu 19.10 Eoan Ermine. - -#### 0.108 (September 10, 2020) - -Bug fixes: - -* When swapping a Deployment, Telepresence correctly sets annotations and other metadata in the proxy Pod. - ([#1430](https://github.com/telepresenceio/telepresence/issues/1430)) - -#### 0.107 (August 29, 2020) - -Features: - -* The Telepresence proxy runs in a bare Pod rather than one managed by a Deployment. - If you experience problems, **please file an issue**, and set the environment variable `TELEPRESENCE_USE_DEPLOYMENT` to any non-empty value to force the old behavior. - Thanks to Maru Newby and Vladimir Kulev for early bug fixes. - ([#1041](https://github.com/telepresenceio/telepresence/issues/1041)) -* When using the vpn-tcp method with a local cluster, Telepresence now supports resolving additional names via the Telepresence proxy. - This makes it possible to properly handle DNS resolution for Minikube ingresses. - Thanks to Vladimir Kulev for the patch. - ([#1385](https://github.com/telepresenceio/telepresence/issues/1385)) - -Bug fixes: - -* Telepresence now automatically exposes ports defined in the Deployment/DeploymentConfig when using an existing Deployment. - Telepresence already did this when swapping deployments. - Thanks to Aslak Knutsen for the patch. - ([#1356](https://github.com/telepresenceio/telepresence/issues/1356)) -* When running on Windows Subsystem for Linux, Telepresence chooses a temporary directory that exists on Debian/Ubuntu WSL installations. - It falls back to the old value if `/mnt/c` is not available. - Thanks to Mark Lee for the patch. - ([#1176](https://github.com/telepresenceio/telepresence/issues/1176)) -* Telepresence avoids proxying the external IP of a local cluster when using the vpn-tcp method. - Thanks to Vladimir Kulev for the patch. - ([#1380](https://github.com/telepresenceio/telepresence/issues/1380)) -* Telepresence avoids generating global networks when guessing Pod and Service networks for the vpn-tcp method. - Thanks to Israël Hallé for the patch. - ([#1420](https://github.com/telepresenceio/telepresence/issues/1420)) - -#### 0.105 (April 24, 2020) - -Features: - -* The `TELEPRESENCE_USE_OCP_IMAGE` environment variable can be set to `YES` or `NO` to require or disallow use of Telepresence's OCP-specific proxy image, or to `AUTO` to let Telepresence decide as before. - Thanks to Maru Newby for the patch. -* When performing a swap deployment operation with the container method, host entries are reflected in the local container. - Thanks to Charlie Catney for the patch. - ([#1097](https://github.com/telepresenceio/telepresence/issues/1097)) -* When using the vpn-tcp method, DNS resolution of names of the form xxx.yyy.zzz.svc is supported. - This is required to handle Strimzi kafka auto-generated addresses for - example (see strimzi/strimzi-kafka-operator#2656). - Thanks to Aurelien Grenotton for the patch. - ([#560](https://github.com/telepresenceio/telepresence/issues/560)) - -Bug fixes: - -* Telepresence creates new deployments using `kubectl create` rather than `kubectl run`. This allows the new deployment operation to succeed with `kubectl` version 1.18 and later. - Thanks to Maru Newby for the patch. - ([#1297](https://github.com/telepresenceio/telepresence/issues/1297)) -* The vpn-tcp method uses an even more robust heuristic to determine the Pod IP space. - Thanks to Maru Newby for the patch. - ([#1201](https://github.com/telepresenceio/telepresence/issues/1201)) - -Misc: - -* Documentation for using Kubernetes client libaries has been expanded. - Thanks to Guray Yildirim for the patch. - ([#1245](https://github.com/telepresenceio/telepresence/issues/1245)) -* Telepresence has native packages for Fedora 31 and Ubuntu Eoan. - Packages for even newer releases will be available once our provider supports them. - ([#1236](https://github.com/telepresenceio/telepresence/issues/1236)) -* Telepresence is no longer packaged for Ubuntu 18.10 (Cosmic Cuttlefish) or Ubuntu 19.04 (Disco Dingo) as those releases have [reached end of life](https://wiki.ubuntu.com/Releases#End_of_Life). - -#### 0.104 (January 23, 2020) - -Bug fixes: - -* Using `--also-proxy` proxies all resolved IPs for a hostname. - Thanks to Markus Maga for the patch. - ([#379](https://github.com/telepresenceio/telepresence/issues/379)) -* The context specified at the command line is used with all startup operations. - Thanks to Bret Palsson for the patch. - ([#1190](https://github.com/telepresenceio/telepresence/issues/1190)) -* The vpn-tcp method uses a more robust heuristic to determine the Pod IP space. - Thanks to Simon Trigona for the patch. - ([#1201](https://github.com/telepresenceio/telepresence/issues/1201)) - -#### 0.103 (October 30, 2019) - -Backwards incompatible changes: - -* Telepresence uses a new OpenShift-specific proxy image when it detects an OpenShift cluster. - It should no longer be necessary to modify OpenShift cluster policies to allow Telepresence to run. - The OpenShift proxy image is based on CentOS Linux (instead of Alpine), which means it is significantly larger than the Kubernetes proxy image, so you may notice additional startup latency. - Use of a CentOS base image should allow for easier approval or certification in some enterprise environments. - Thanks to GitHub user ReSearchITEng for the patch. - ([#1114](https://github.com/telepresenceio/telepresence/pull/1114)) -* Telepresence uses a new strategy to detect an OpenShift cluster. - If `openshift.io` is present in the output of `kubectl api-versions`, Telepresence treats the cluster as OpenShift. - It prefers `oc` over `kubectl` and uses the OpenShift-specific image as above. - Thanks to Bartosz Majsak for the original patch; blame to the Ambassador Labs team for errors in the ultimate implementation. - ([#1139](https://github.com/telepresenceio/telepresence/issues/1139)) - -Features: - -* Telepresence supports forwarding traffic to and from other containers in the pod. This is useful to connect to proxy/helper containers (`--to-pod`) and to use adapters/agents sending traffic to your app (`--from-pod`). - ([#728](https://github.com/telepresenceio/telepresence/issues/728)) -* When using the `vpn-tcp` method on MacOS, Telepresence will flush the DNS cache once connected. - This can be useful to clear cached NXDOMAIN responses that were populated when Telepresence was not yet connected. - Thanks to Karolis Labrencis for the patch. - ([#1118](https://github.com/telepresenceio/telepresence/issues/1118)) - -Bug fixes: - -* On WSL (Windows 10), Telepresence uses a Docker-accessible directory as the temporary dir. - Thanks to Shawn Dellysse for the patch. - ([#1148](https://github.com/telepresenceio/telepresence/issues/1148)) -* The connectivity test for the inject-tcp method is able to succeed in more cluster configurations. - The test hostname is now `kubernetes.default` instead of `kubernetes.default.svc.cluster.local`. - Thanks to Mohammad Teimori Pabandi for the patch. - ([#1141](https://github.com/telepresenceio/telepresence/issues/1141)) - ([#1161](https://github.com/telepresenceio/telepresence/issues/1161)) - -#### 0.102 (October 2, 2019) - -Features: - -* You can set the Kubernetes service account for new and swapped deployments using the `--serviceaccount` option. - Thanks to Bill McMath and Dmitry Bazhal for the patches. - ([#1093](https://github.com/telepresenceio/telepresence/issues/1093)) -* When using the container method, you can forward container ports to the host machine. - This can be useful to allow code running in your container to connect to an IDE or debugger running on the host. - ([#1022](https://github.com/telepresenceio/telepresence/issues/1022)) -* When using the container method, Telepresence can use a Docker volume to mount remote volumes. - This makes it possible to use volumes even if you don't have mount privileges or capabilities on your main system, e.g. in a container. - See [the documentation](../../howto/volumes#volume-access-via-docker-volume-for-the-container-method) for more about the new `--docker-mount` feature. - This is Linux-only for the moment: [#1135](https://github.com/telepresenceio/telepresence/issues/1135). - Thanks to Sławomir Kwasiborski for the patch. -* When using the default `vpn-tcp` method, you can use the `--local-cluster` flag to bypass local cluster heuristics and force Telepresence to use its DNS loop avoidance workaround. -* Telepresence sets the `command` field when swapping a deployment. - Thanks to GitHub user netag for the patch. - - -Bug fixes: - -* When using the container method, Telepresence notices if the Docker daemon is not local and reports an error. - ([#873](https://github.com/telepresenceio/telepresence/issues/873)) -* Telepresence is somewhat more robust when working with a local cluster. - ([#1000](https://github.com/telepresenceio/telepresence/issues/1000)) -* Telepresence no longer crashes on `ssh` timeouts. - ([#1075](https://github.com/telepresenceio/telepresence/issues/1075)) -* The CPU limit for the Telepresence pod for new deployments is now 1, fixing performance degradation caused by CPU time throttling. - See https://github.com/kubernetes/kubernetes/issues/67577 and https://github.com/kubernetes/kubernetes/issues/51135 for more information. - Thanks to Zhuo Peng for the patch. - ([#1120](https://github.com/telepresenceio/telepresence/issues/1120)) - -Misc: - -* When using the `inject-tcp` method, Telepresence no longer tries to connect to google.com to check for connectivity. - Now it tries to connect to kubernetes.default.svc.cluster.local, which should be accessible in common cluster configurations. - Thanks to GitHub user ReSearchITEng for the patch. -* Telepresence detects an additional name for Docker for Desktop. - Thanks to William Austin for the patch. - -#### 0.101 (June 19, 2019) - -Bug fixes: - -* Telepresence once again exits when your process finishes. - ([#1052](https://github.com/telepresenceio/telepresence/issues/1052)) -* When using the vpn-tcp method in a container, Telepresence warns you if it is unable to use `iptables` due to missing capabilities, instead of crashing mysteriously much later. - ([#1054](https://github.com/telepresenceio/telepresence/issues/1054)) - -#### 0.100 (June 10, 2019) - -Features: - -* Telepresence can use an OpenShift DeploymentConfig with the `--deployment` option. - Thanks to Aslak Knutsen for the patch. - ([#1037](https://github.com/telepresenceio/telepresence/issues/1037)) - -Bug fixes: - -* The unprivileged proxy image switches to the intended UID when unexpectedly running as root. - This remedies the "unprotected key file" warning from `sshd` and the subsequent proxy pod crash seen by some users. - ([#1013](https://github.com/telepresenceio/telepresence/issues/1013)) -* Attaching a debugger to the process running under Telepresence no longer causes the session to end. - Thanks to Gigi Sayfan for the patch. - ([#1003](https://github.com/telepresenceio/telepresence/issues/1003)) - -Misc: - -* If you make a [pull request](https://github.com/telepresenceio/telepresence/pulls) on GitHub, unit tests and linters will run against your PR automatically. - We hope the quick automated feedback will be helpful. - Thank you for your contributions! - -#### 0.99 (April 17, 2019) - -Bug fixes: - -* Telepresence correctly forwards privileged ports when using swap-deployment. - ([#983](https://github.com/telepresenceio/telepresence/issues/983)) -* Telepresence once again operates correctly with large clusters. - ([#981](https://github.com/telepresenceio/telepresence/issues/981)) -* Telepresence no longer crashes when the `docker` command requires `sudo`. - ([#995](https://github.com/telepresenceio/telepresence/issues/995)) - -Misc: - -* Additional timeouts around DNS lookups should make Telepresence startup more reliable when using the default vpn-tcp method. - ([#986](https://github.com/telepresenceio/telepresence/issues/986)) -* When calling `sudo`, Telepresence offers a link to [documentation](../install#dependencies) about why elevated privileges are required. - ([#262](https://github.com/telepresenceio/telepresence/issues/262)) - -#### 0.98 (April 2, 2019) - -Features: - -* The `TELEPRESENCE_MOUNTS` environment variable contains a list of remote mount points. - See [the documentation](../../howto/volumes) for more information and example usage. - Thanks to GitHub user turettn for the patch. - ([#917](https://github.com/telepresenceio/telepresence/issues/917)) - -Bug fixes: - -* Telepresence no longer crashes when used with kubectl 1.14. - ([#966](https://github.com/telepresenceio/telepresence/issues/966)) -* Telepresence no longer quits if its `kubectl logs` subprocess quits. - ([#598](https://github.com/telepresenceio/telepresence/issues/598)) -* Telepresence waits until a deployment config on OpenShift is successfully rolled back to its original state before proceeding with further cleanup. - Thanks to Bartosz Majsak for the patch. - ([#929](https://github.com/telepresenceio/telepresence/issues/929)) -* Telepresence tries to detect Kubernetes running with `kind` (kube-in-docker) and work around networking issues the same way as for Minikube. - Thanks to Rohan Singh for the patch. - ([#932](https://github.com/telepresenceio/telepresence/issues/932)) -* Telepresence accepts private Docker registries as sources for required images when using `TELEPRESENCE_REGISTRY`. - Thanks to GitHub user arroo for the patch. - ([#967](https://github.com/telepresenceio/telepresence/issues/967)) -* Telepresence's container method supports non-standard cluster DNS search domains. - Thanks to Loïc Minaudier for the patch. - ([#940](https://github.com/telepresenceio/telepresence/pull/940)) - - -Misc: - -* Telepresence has a native package for the soon-to-be-released Ubuntu Dingo. -* Improved the [Java development documentation](../../tutorials/java#debugging-your-code) with updated Maven debug options for JDK versions 5-8. - Thanks to Sanha Lee for the patch. - ([#955](https://github.com/telepresenceio/telepresence/issues/955)) - -#### 0.97 (January 25, 2019) - -Backwards incompatible changes: - -* A successful Telepresence session now exits with the return code of your process. This should make it easier to use Telepresence in scripts. - ([#886](https://github.com/telepresenceio/telepresence/issues/886)) - -Bug fixes: - -* Telepresence should no longer crash if the terminal width is unavailable. - ([#901](https://github.com/telepresenceio/telepresence/issues/901)) -* The container method now outputs the same helpful text about which ports are exposed as the other methods do. - ([#235](https://github.com/telepresenceio/telepresence/issues/235)) -* Telepresence tries to detect Kubernetes in Docker Desktop and work around networking issues the same way as for Minikube. - Thanks to Rohan Singh for the patch. - ([#736](https://github.com/telepresenceio/telepresence/issues/736)) - -Misc: - -* Support for OpenShift has been brought up to date. - Thanks to Bartosz Majsak for the patch. -* Telepresence masks (hides) Kubernetes access tokens in the log file. - Previously, access tokens would be logged when running in verbose mode. - Thanks to Bartosz Majsak for the patch. - ([#889](https://github.com/telepresenceio/telepresence/issues/889)) -* Telepresence has native packages for the recently-released Fedora 29 and Ubuntu Cosmic. - ([#876](https://github.com/telepresenceio/telepresence/issues/876)) - -#### 0.96 (December 14, 2018) - -Bug fixes: - -* When using the container method, all outgoing traffic is directed to the cluster. - It is no longer necessary or meaningful to use `--also-proxy` with `--docker-run`. - ([#391](https://github.com/telepresenceio/telepresence/issues/391)) -* Telepresence shows more information when a background process dies unexpectedly, including the last few lines of output. - If this happens during startup, the output is included in the crash report. - ([#842](https://github.com/telepresenceio/telepresence/issues/842)) -* Telepresence is less likely to get confused by network setups that have IPv6 enabled. - ([#783](https://github.com/telepresenceio/telepresence/issues/783)) -* Telepresence outputs a warning if cluster and client versions differ greatly. - ([#426](https://github.com/telepresenceio/telepresence/issues/426)) -* Instead of crashing, Telepresence reports an error when - * the deployment named by `--deployment` does not exist. - ([#592](https://github.com/telepresenceio/telepresence/issues/592)) - * the deployment named by `--new-deployment` already exists. - ([#756](https://github.com/telepresenceio/telepresence/issues/756)) - * your command cannot be launched. - ([#869](https://github.com/telepresenceio/telepresence/issues/869)) - -#### 0.95 (December 6, 2018) - -Bug fixes: - -* Telepresence no longer ignores the context argument when checking whether the selected namespace exists. - ([#787](https://github.com/telepresenceio/telepresence/issues/787)) -* Telepresence functions in more restrictive cluster environments because the proxy pod no longer tries to modify the filesystem. - ([#848](https://github.com/telepresenceio/telepresence/issues/848)) -* When a background process dies unexpectedly, Telepresence will notice much sooner. - This is particularly helpful during session start, as Telepresence can sometimes avoid waiting through a long timeout before crashing. - ([#590](https://github.com/telepresenceio/telepresence/issues/590)) -* Cleanup of background processes is more robust to individual failures. - ([#586](https://github.com/telepresenceio/telepresence/issues/586)) - -Misc: - -* The container method no longer uses `ifconfig` magic and `socat` to connect the local container to the cluster, relying on `ssh` port forwarding instead. - If you've had trouble with Telepresence's use of the Docker bridge interface (typically `docker0` on Linux, unavailable on Windows), this change avoids all of that. - This is technically a breaking change, as ports 38022 and 38023 are used by the new machinery. - Those ports are now unavailable for user code. - In practice, most users should not notice a difference. - ([#726](https://github.com/telepresenceio/telepresence/issues/726)) -* The `./build` development script no longer exists. - Its functionality has been merged into the Makefile. - See `make help` for the new usage information. - ([#839](https://github.com/telepresenceio/telepresence/issues/839)) - - -#### 0.94 (November 12, 2018) - -Bug fixes: - -* Telepresence no longer crashes at launch for OpenShift/MiniShift users. Thanks to Tom Ellis for the patch. - ([#781](https://github.com/telepresenceio/telepresence/issues/781)) - -Misc: - -* When a new version is available, Telepresence will tell you at the end of the session. - ([#285](https://github.com/telepresenceio/telepresence/issues/285)) - -#### 0.93 (October 4, 2018) - -Bug fixes: - -* Telepresence reports an error message when the specified namespace is not found. - ([#330](https://github.com/telepresenceio/telepresence/issues/330)) -* The container method no longer crashes when no ports are exposed. - ([#750](https://github.com/telepresenceio/telepresence/issues/750)) - -Misc: - -* Telepresence detects that it is running as root and suggests the user not launch Telepresence under sudo if there is trouble talking to the cluster. - Thanks to Rohan Gupta for the patch. - ([#460](https://github.com/telepresenceio/telepresence/issues/460)) - -#### 0.92 (August 21, 2018) - -Bug fixes: - -* Fixed the `bash--norc` typo introduced in 0.91. - ([#738](https://github.com/telepresenceio/telepresence/issues/738)) - -#### 0.91 (August 17, 2018) - -Bug fixes: - -* Conntrack, iptables, and a few other dependencies are automatically found on more Linux distributions now. - ([#278](https://github.com/telepresenceio/telepresence/issues/278)) -* Telepresence no longer crashes in the presence of an empty or corrupted cache file. - ([#713](https://github.com/telepresenceio/telepresence/issues/713)) - -#### 0.90 (June 12, 2018) - -Bug fixes: - -* Fixed a regression in the Telepresence executable mode bits in packages. - ([#682](https://github.com/telepresenceio/telepresence/issues/682)) -* Fixed other packaging-related issues. - -#### 0.89 (June 11, 2018) - -Bug fixes: - -* When launching the user's container (when using the container method), if the `docker` command requires `sudo`, Telepresence now uses `sudo -E` to ensure that environment variables get passed through to the container. - This fixes a regression caused by the [fix for multi-line environment variables (#301)](https://github.com/telepresenceio/telepresence/issues/301). - ([#672](https://github.com/telepresenceio/telepresence/issues/672)) - -Misc: - -* Version number handling has been simplified. - ([#641](https://github.com/telepresenceio/telepresence/issues/641)) -* Linux packaging has been simplified. - ([#643](https://github.com/telepresenceio/telepresence/issues/643)) - -#### 0.88 (May 16, 2018) - -Features: - -* Various points in the Kubernetes stack have timeouts for idle connections. - This includes the Kubelet, the API server, or even an ELB that might be in front of everything. - Telepresence now avoids those timeouts by periodically sending data through its open connections. - In some cases, this will prevent sessions from ending abruptly due to a lost connection. - ([#573](https://github.com/telepresenceio/telepresence/issues/573)) - -#### 0.87 (May 11, 2018) - -Features: - -* Telepresence can optionally emit the remote environment as a JSON blob or as a `.env`-format file. - Use the `--env-json` or `--env-file` options respectively to specify the desired filenames. - See [https://docs.docker.com/compose/env-file/](https://docs.docker.com/compose/env-file/) for information about the limitations of the Docker Compose-style `.env` file format. - ([#608](https://github.com/telepresenceio/telepresence/issues/608)) - -Bug fixes: - -* Telepresence can now transfer complex environment variable values without truncating or mangling them. - ([#597](https://github.com/telepresenceio/telepresence/issues/597)) -* The container method now supports multi-line environment variable values. - ([#301](https://github.com/telepresenceio/telepresence/issues/301)) -* Telepresence avoids running afoul of lifecycle hooks when swapping a deployment. - ([#587](https://github.com/telepresenceio/telepresence/issues/587)) - -#### 0.86 (April 26, 2018) - -Misc: - -* By default, the Telepresence proxy runs in the cluster as an unprivileged user; as built, the proxy image gives up root privileges. - However, when the proxy needs to bind to privileged ports, it _must_ run as root. - Instead of using the security context feature of Kubernetes to gain root, Telepresence now uses a different image that does not give up root privileges. - This allows Telepresence to run in clusters that lock down the Kubernetes security context feature. - ([#617](https://github.com/telepresenceio/telepresence/issues/617)) - -#### 0.85 (April 23, 2018) - -Features: - -* You can set `$TELEPRESENCE_ROOT` to a known path using the `--mount=/known/path` argument. - See the [volumes documentation](../../howto/volumes) for example usage. - ([#454](https://github.com/telepresenceio/telepresence/issues/454)) -* Turn off volume support entirely with `--mount=false`. - ([#378](https://github.com/telepresenceio/telepresence/issues/378)) - -Bug fixes: - -* The swap-deployment operation works differently now. - - The original method saved a copy of the deployment manifest, deleted the deployment, and then created a deployment for Telepresence. - To clean up, it deleted the Telepresence deployment and recreated the original deployment using the saved manifest. - The problem with this approach was that an outside system managing known deployments could clobber the Telepresence deployment, causing the user's Telepresence session to crash mysteriously. - - The new method creates a separate deployment for Telepresence with the same labels as the original deployment and scales down the original deployment to zero replicas. - Services will find the new deployment the same way they found the original, via label selectors. - To clean up, it deletes the Telepresence deployment and scales the original deployment back to its previous replica count. - - An outside system managing known deployments should not touch the Telepresence deployment; it may scale up the original and steal requests from the Telepresence session, but at least that session won't crash mysteriously as it would before. - ([#575](https://github.com/telepresenceio/telepresence/issues/575)) - -#### 0.84 (April 20, 2018) - -Bug fixes: - -* This release fixes startup checks for cluster access to avoid crashing or quitting unnecessarily when sufficient access is available. - -#### 0.83 (April 16, 2018) - -Misc: - -* Telepresence requires fewer cluster permissions than before. - The required permissions are now [documented](../connecting#cluster-permissions) for Kubernetes. - ([#568](https://github.com/telepresenceio/telepresence/issues/568)) - -#### 0.82 (April 11, 2018) - -Bug fixes: - -* When using the vpn-tcp method, DNS queries from the domain search path no longer yield NXDOMAIN. - Unfortunately, the expected follow-up query does not occur under some network conditions. - This change fixes a DNS regression introduced in 0.79. - ([#578](https://github.com/telepresenceio/telepresence/issues/578)) - -#### 0.81 (April 6, 2018) - -Bug fixes: - -* This release corrects a race condition in subprocess output capture when using the `--verbose` option. - -#### 0.79 (April 6, 2018) - -Bug fixes: - -* Telepresence now supports IPv4 reverse lookups when using `--method=inject-tcp`. - ([#195](https://github.com/telepresenceio/telepresence/issues/195)) -* No more crash when Telepresence cannot write to its log file. - ([#459](https://github.com/telepresenceio/telepresence/issues/459)) -* Fixed remaining instances of logfile content that was not time and origin stamped. - As a side-effect, the `stamp-telepresence` command has been removed. - ([#390](https://github.com/telepresenceio/telepresence/issues/390)) - -Misc: - -* The commands that Telepresence launches have always been recorded in the logfile. - Now they are formatted so they can be copy-pasted into your terminal in most cases. -* The beginning of the logfile contains more information about your local and cluster setup to aid with bug reports and troubleshooting. -* The crash reporter does a better job of capturing relevant information and tries to avoid missing the end of the logfile. - ([#446](https://github.com/telepresenceio/telepresence/issues/446)) - ([#466](https://github.com/telepresenceio/telepresence/issues/466)) -* When using the vpn-tcp method, DNS queries from the domain search path (the search list in `/etc/resolv.conf`) now yield NXDOMAIN instead of implicitly stripping off the search suffix. - The resolver library will eventually query for the bare name (without the search suffix), at which point Telepresence DNS will return the expected IP address in the cluster. - ([#192](https://github.com/telepresenceio/telepresence/issues/192)) - -#### 0.78 (March 29, 2018) - -Features: - -* Telepresence starts roughly five seconds faster on every invocation thanks to some basic caching of cluster information. - The cache is stored in `~/.cache/telepresence` and is cleared automatically after twelve hours. - Delete the contents of that directory to clear the cache manually. - -Bug fixes: - -* When using the container method, Telepresence waits longer for networking to start before giving up. - This may help users who sometimes experience higher latency between their local network and their Kubernetes cluster. - ([#340](https://github.com/telepresenceio/telepresence/issues/340)) - ([#539](https://github.com/telepresenceio/telepresence/issues/539)) - -#### 0.77 (March 26, 2018) - -Misc: - -* Updates to the release process. - -#### 0.76 (March 25, 2018) - -Features: - -* Added the ability to specify `--init=false` flag when using `--docker-run` - Thanks to GitHub user CMajeri for the patch. - ([#481](https://github.com/telepresenceio/telepresence/issues/481)) - -Bug fixes: - -* Telepresence now makes a greater effort to account for local DNS search domain configuration when bridging DNS to Kubernetes when using `--method=vpn-tcp`. - ([#393](https://github.com/telepresenceio/telepresence/issues/393)) -* Telepresence should no longer get confused looking for the route to the host when using the container method. - ([#532](https://github.com/telepresenceio/telepresence/issues/532)) - -Misc: - -* A new end-to-end test suite setup will help us reduce the cycle time associated with testing Telepresence. We added documentation introducing developers to Telepresence end-to-end test development. - ([#400](https://github.com/telepresenceio/telepresence/issues/400)) -* Improved cleanup of our testing cluster used by CI. -* Reduced the verbosity of DNS lookup failures in the logs. - ([#497](https://github.com/telepresenceio/telepresence/issues/497)) - -#### 0.75 (January 30, 2018) - -Bug fixes: - -* Telepresence correctly handles the `--publish` (`-p`) Docker option by incorporating it into the `docker` invocation that sets up networking. - ([#387](https://github.com/telepresenceio/telepresence/issues/387)) - -Misc: - -* The end of startup and the beginning of shutdown are now both clearly indicated in `telepresence.log`. -* Environment and testing setup is no longer entangled with TravisCI setup. - The `environment-setup.sh` and `build` scripts are used by continuous integration and can be used by developers as well. - ([#374](https://github.com/telepresenceio/telepresence/issues/374)) -* Continuous integration operations, specifically testing, have been moved to CircleCI. - The release process remains on TravisCI, at least for this release. - ([#397](https://github.com/telepresenceio/telepresence/issues/397)) - ([#417](https://github.com/telepresenceio/telepresence/issues/417)) - -#### 0.73 (December 28, 2017) - -Features: - -* The `--also-proxy` feature supports specifying IP ranges (in [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation)) in addition to hostnames and individual IPs. - ([#375](https://github.com/telepresenceio/telepresence/issues/375)) - -Misc: - -* Telepresence source code is no longer one giant Python file. - This will allow for quicker development going forward. - ([#377](https://github.com/telepresenceio/telepresence/pull/377)) -* Telepresence source code conforms to `yapf` formatting. - The lint stage of the CI pipeline enforces this. - ([#368](https://github.com/telepresenceio/telepresence/issues/368)) - -#### 0.72 (December 12, 2017) - -Misc: - -* The Telepresence source tree is organized more like a typical Python project. - This will allow for quicker development going forward. - ([#344](https://github.com/telepresenceio/telepresence/pull/344)) -* Telepresence has native packages for Ubuntu Xenial, Zesty, and Artful, and for Fedora 26 and 27. - ([#269](https://github.com/telepresenceio/telepresence/issues/269)) -* An install script is included for installing Telepresence from source. - ([#347](https://github.com/telepresenceio/telepresence/issues/347)) - -#### 0.71 (November 1, 2017) - -Bug fixes: - -* Telepresence no longer crashes on deployments containing Services of type ExternalName. - Thanks to Niko van Meurs for the patch. - ([#324](https://github.com/telepresenceio/telepresence/pull/324), - [#329](https://github.com/telepresenceio/telepresence/pull/329)) - -Misc: - -* The [anonymous usage information](../usage_reporting) reported by Telepresence now includes the operation (e.g., "swap-deployment") and method (e.g., "vpn-tcp") used. - This will help us focus development resources. -* Telepresence is no longer packaged for Ubuntu 16.10 (Yakkety Yak) as that release has [reached end of life](http://fridge.ubuntu.com/2017/07/20/ubuntu-16-10-yakkety-yak-end-of-life-reached-on-july-20-2017/). - -#### 0.68 (October 12, 2017) - -Bug fixes: - -* Telepresence no longer crashes when the deployment has multi-line environment variables. - ([#301](https://github.com/telepresenceio/telepresence/issues/301)) -* Telepresence now sets a CPU limit on its Kubernetes pods. - ([#287](https://github.com/telepresenceio/telepresence/issues/287)) -* Deployments that do not use the default service account (and thus don't automatically have access to service account credentials for the k8s API) are now supported. - Thanks to Dino Hensen for the patch. - ([#313](https://github.com/telepresenceio/telepresence/pull/313), - [#314](https://github.com/telepresenceio/telepresence/pull/314)) - -Misc - -* [Telepresence documentation](../../) uses GitBook. - -#### 0.67 (September 21, 2017) - -Bug fixes: - -* The macOS Homebrew installation no longer assumes that you have Homebrew installed in the default location (`/usr/local`). It also no longer requires `virtualenv` to be installed. - -Misc: - -* The Telepresence logfile now has time and source stamps for almost every line. This will help us diagnose problems going forward. -* Clarified which support binaries are being looked for and where on startup. -* The website now has a [community page](https://www.telepresence.io/community). -* Cleaned up some links (HTTP vs HTTPS, avoid redirection). - -#### 0.65 (August 29, 2017) - -Bug fixes: - -* Avoid a dependency conflict in the macOS Homebrew installation by dropping the required dependency on `socat`. You will still need to install `socat` if you want to use `--method container`, but installing it separately from Telepresence appears to work fine. Thanks to Dylan Scott for chasing this down. - ([#275](https://github.com/telepresenceio/telepresence/issues/275)) - -#### 0.64 (August 23, 2017) - -Bug fixes: - -* Allow `make build-k8s-proxy-minikube` to work on macOS. Same for Minishift. -* Allow `--logfile /dev/null` - -Misc: - -* Documented macOS limitations with `--method inject-tcp` due to System Integrity Protection. Thanks to Dylan Scott for the detailed write-up. - ([#268](https://github.com/telepresenceio/telepresence/issues/268)) -* The [website](https://www.telepresence.io/) has TLS enabled -* Telepresence [reports anonymous usage information](../usage_reporting) during startup - -#### 0.63 (July 31, 2017) - -Bug fixes: - -* Fixed regression in `--swap-deployment` where it would add a proxy container instead of replacing the existing one. - ([#253](https://github.com/telepresenceio/telepresence/issues/253)) - -#### 0.62 (July 26, 2017) - -Bug fixes: - -* Support for Linux distributions using systemd-resolved, like Ubuntu 17.04 and Arch, now works when there is no search domain set. - Thanks to Vladimir Pouzanov for the bug report, testing, and useful suggestions. - ([#242](https://github.com/telepresenceio/telepresence/issues/242)) -* Better method for bypassing DNS caching on startup, which should be more robust. -* Instead of hardcoding /16, using a better heuristic for guessing the IP range for Services. - Thanks to Vladimir Pouzanov for the bug report. - ([#243](https://github.com/telepresenceio/telepresence/issues/243)) -* SIGHUP now clean ups resources the same way SIGTERM and hitting Ctrl-C do. - ([#184](https://github.com/telepresenceio/telepresence/issues/184)) - -#### 0.61 (July 19, 2017) - -Bug fixes: - -* Environment variables created using ConfigMaps and Secrets (using `envFrom`) are now made available to the local process. - Thanks to Tristan Pemble for the bug report. - ([#230](https://github.com/telepresenceio/telepresence/issues/230)) - -#### 0.60 (July 18, 2017) - -Features: - -* When using `--swap-deployment`, ports listed in the existing Deployment are automatically forwarded. - Thanks to Phil Lombardi and Rafi Schloming for the feature request. - ([#185](https://github.com/telepresenceio/telepresence/issues/185)) - -Misc: - -* Switched to upstream `sshuttle` instead of using forked version. - -#### 0.59 (July 18, 2017) - -Bug fixes: - -* When using `--swap-deployment`, many more container options that would break `telepresence` are swapped out. - Thanks to Jonathan Wickens for the bug report. - ([#226](https://github.com/telepresenceio/telepresence/issues/226)) - -#### 0.58 (July 13, 2017) - -Bug fixes: - -* Fixed regression that broke Docker on OS X. - Thanks to Vincent van der Weele for the bug report. - ([#221](https://github.com/telepresenceio/telepresence/issues/221)) - -#### 0.57 (July 6, 2017) - -Bug fixes: - -* Fix DNS lookups on macOS in `vpn-tcp` mode. - Thanks to number101010 for the bug report. - ([#216](https://github.com/telepresenceio/telepresence/issues/216)) - -#### 0.56 (July 5, 2017) - -Features: - -* `--help` now includes some examples. - ([#189](https://github.com/telepresenceio/telepresence/issues/189)) - -Bug fixes: - -* `--docker-run` container no longer gets environment variables from the host, only from the remote pod. - ([#214](https://github.com/telepresenceio/telepresence/issues/214)) - -#### 0.55 (June 30, 2017) - -Features: - -* `--method` is now optional, defaulting to "vpn-tcp", or "container" when `--docker-run` is used. - ([#206](https://github.com/telepresenceio/telepresence/issues/206)) -* If no deployment method (`--new-deployment`, `--swap-deployment` or `--deployment`) then `--new-deployment` is used by default with a randomly generated name. - ([#170](https://github.com/telepresenceio/telepresence/issues/170)) - -#### 0.54 (June 28, 2017) - -Features: - -* `--method vpn-tcp` now works on minikube and minishift. - As a result we now recommend using it as the default method. - ([#160](https://github.com/telepresenceio/telepresence/issues/160)) - -Bug fixes: - -* Support more versions of Linux in container mode. - Thanks to Henri Koski for bug report and patch. - ([#202](https://github.com/telepresenceio/telepresence/issues/202)) - -#### 0.53 (June 27, 2017) - -Features: - -* `--expose` can now expose a different local port than the one used on the cluster side. - ([#180](https://github.com/telepresenceio/telepresence/issues/180)) - -Bug fixes: - -* Fix regression where exposing ports <1024 stopped working. - ([#194](https://github.com/telepresenceio/telepresence/issues/194)) -* Fix regression where tools like `ping` weren't hidden on Mac in `inject-tcp` method. - ([#187](https://github.com/telepresenceio/telepresence/issues/187)) - -#### 0.52 (June 21, 2017) - -Features: - -* Telepresence can now be used to proxy Docker containers, by using `--method container` together with `--docker-run`. - Thanks to Iván Montes for the feature request and initial testing. - ([#175](https://github.com/telepresenceio/telepresence/issues/175)) - -#### 0.51 (June 13, 2017) - -Bug fixes: - -* Default `ssh` config is not used, in case it has options that break Telepresence. - Thanks to KUOKA Yusuke for the bug report, and Iván Montes for debugging and the patch to fix it. - ([#174](https://github.com/telepresenceio/telepresence/issues/174)) - -#### 0.50 (June 8, 2017) - -Bug fixes: - -* If no `current-context` is set in the Kubernetes config, then give a nice - error message indicating the need for passing `--context` option to - `telepresence`. - Thanks to Brandon Philips for the bug report. - ([#164](https://github.com/telepresenceio/telepresence/issues/164)) -* `oc` will not be used unless we're sure we're talking to an OpenShift server. This is useful for Kubernetes users who happen to have a `oc` binary that isn't the OpenShift client. - Thanks to Brandon Philips for the bug report. - ([#165](https://github.com/telepresenceio/telepresence/issues/165)) - -#### 0.49 (June 7, 2017) - -Features: - -* **Backwards incompatible change:** Telepresence now supports a alternative to `LD_PRELOAD`, a VPN-like connection using [sshuttle](http://sshuttle.readthedocs.io/en/stable/). As a result the `telepresence` command line now has an extra required argument `--method`. - ([#128](https://github.com/telepresenceio/telepresence/issues/128)) -* Added shortcuts for a number of the command line arguments. - -#### 0.48 (May 25, 2017) - -Bug fixes: - -* `--swap-deployment` now works in more cases on OpenShift, in particular when `oc new-app` was used. - -#### 0.47 (May 23, 2017) - -Features: - -* `--swap-deployment` allows replacing an existing Deployment with Telepresence, and then swapping back on exiting the `telepresence` command line. - ([#9](https://github.com/telepresenceio/telepresence/issues/9)) - -#### 0.46 (May 16, 2017) - -Features: - -* Preliminary support for OpenShift Origin. - Thanks to Eli Young for lots of help figuring out the necessary steps. - ([#132](https://github.com/telepresenceio/telepresence/issues/132)) - -Bug fixes: - -* Pods created with `--new-deployment` are now looked up using a unique ID, preventing issues where a pod from a previous run was mistakenly used. - ([#94](https://github.com/telepresenceio/telepresence/issues/94)) - -#### 0.45 (May 8, 2017) - -Bug fixes: - -* The Kubernetes-side container used by Telepresence no longer runs as root. - This will make support for OpenShift Origin easier, as well as other environments that don't want containers running as root. - Thanks to Eli Young for the patch. -* Increased connection timeout from 3 seconds to 10 seconds, in the hopes of reducing spurious disconnects. - ([#88](https://github.com/telepresenceio/telepresence/issues/88)) -* Common commands that won't work under Telepresence, like `ping` and `nslookup`, will now fail with an appropriate error messages. - ([#139](https://github.com/telepresenceio/telepresence/issues/139)) - -#### 0.44 (May 4, 2017) - -Bug fixes: - -* `telepresence` fails with a better error if a too-old version of Python is used. - Thanks to Victor Gdalevich for the bug report. - ([#136](https://github.com/telepresenceio/telepresence/issues/136)) -* `telepresence` automatic bug reporting code is triggered by errors during parsing command line arguments. -* If namespace was set using `kubectl config set-context` it will no longer cause Telepresence to break. - Thanks to spiddy for the bug report. - ([#133](https://github.com/telepresenceio/telepresence/issues/133)) - -#### 0.43 (May 3, 2017) - -Features: - -* `--run` lets you run a command directly as an alternative to running a shell, e.g. `telepresence --new-deployment test --run python3 myapp.py`. -* `telepresence` starts up much faster by polling more frequently and reducing unnecessary sleeps. - -#### 0.42 (April 28, 2017) - -Bug fixes: - -* `~/.bashrc` is no longer loaded by the Telepresence shell, to ensure it doesn't break when e.g. `kubectl` is run there. Thanks to discopalevo for the bug report. - ([#126](https://github.com/telepresenceio/telepresence/issues/126)) -* Log files are written to original path, not wherever you happen to `cd` to. - ([#120](https://github.com/telepresenceio/telepresence/issues/120)) -* Better error messages when a custom Deployment is missing or misconfigured. - ([#121](https://github.com/telepresenceio/telepresence/issues/121)) - -#### 0.41 (April 26, 2017) - -Features: - -* Telepresence can run on Windows using the Windows Subsystem for Linux. - -Bug fixes: - -* Telepresence now sets a RAM limit on its Kubernetes pods. -* Telepresence Kubernetes pod exits faster. - -Releases 0.31 to 0.40 were spent debugging release automation. - -#### 0.30 (April 19, 2017) - -Features: - -* Telepresence can now be installed via Homebrew on OS X. - -#### 0.29 (April 13, 2017) - -Bug fixes: - -* Fix surprising error about `umount` when shutting down on Linux. - -#### 0.28 (April 13, 2017) - -Features: - -* Remote volumes are now accessible by the local process. - ([#78](https://github.com/telepresenceio/telepresence/issues/78)) - -#### 0.27 (April 12, 2017) - -Features: - -* `--context` option allows choosing a `kubectl` context. - Thanks to Svend Sorenson for the patch. - ([#3](https://github.com/telepresenceio/telepresence/issues/3)) - -Bug fixes: - -* Telepresence no longer breaks if compression is enabled in `~/.ssh/config`. - Thanks to Svend Sorenson for the bug report. - ([#97](https://github.com/telepresenceio/telepresence/issues/97)) - -#### 0.26 (April 6, 2017) - -Backwards incompatible changes: - -* New requirements: openssh client and Python 3 must be installed for Telepresence to work. - Docker is no longer required. - -Features: - -* Docker is no longer required to run Telepresence. - ([#78](https://github.com/telepresenceio/telepresence/issues/78)) -* Local servers just have to listen on localhost (127.0.0.1) in order to be accessible to Kubernetes; previously they had to listen on all interfaces. - ([#77](https://github.com/telepresenceio/telepresence/issues/77)) - -0.25 failed the release process due to some sort of mysterious mistake. - -#### 0.24 (April 5, 2017) - -Bug fixes: - -* The `KUBECONFIG` environment variable will now be respected, so long as it points at a path inside your home directory. - ([#84](https://github.com/telepresenceio/telepresence/issues/84)) -* Errors on startup are noticed, fixing issues with hanging indefinitely in the "Starting proxy..." phase. - ([#83](https://github.com/telepresenceio/telepresence/issues/83)) - -#### 0.23 (April 3, 2017) - -Bug fixes: - -* Telepresence no longer uses lots of CPU busy-looping. - Thanks to Jean-Paul Calderone for the bug report. - -#### 0.22 (March 30, 2017) - -Features: - -* Telepresence can now interact with any Kubernetes namespace, not just the default one. - ([#74](https://github.com/telepresenceio/telepresence/issues/74)) - -Backwards incompatible changes: - -* Running Docker containers locally (`--docker-run`) is no longer supported. - This feature will be reintroduced in the future, with a different implementation, if there is user interest. - [Add comments here](https://github.com/telepresenceio/telepresence/issues/76) if you're interested. - -#### 0.21 (March 28, 2017) - -Bug fixes: - -* Telepresence exits when connection is lost to the Kubernetes cluster, rather than hanging. -* Telepresence notices when the proxy container exits and shuts down. - ([#24](https://github.com/telepresenceio/telepresence/issues/24)) - -#### 0.20 (March 27, 2017) - -Bug fixes: - -* Telepresence only copies environment variables explicitly configured in the `Deployment`, rather than copying all environment variables. -* If there is more than one container Telepresence copies the environment variables from the one running the `datawire/telepresence-k8s` image, rather than the first one. - ([#38](https://github.com/telepresenceio/telepresence/issues/38)) - -#### 0.19 (March 24, 2017) - -Bug fixes: - -* Fixed another issue with `--run-shell` on OS X. - -#### 0.18 (March 24, 2017) - -Features: - -* Support `--run-shell` on OS X, allowing local processes to be proxied. -* Kubernetes-side Docker image is now smaller. - ([#61](https://github.com/telepresenceio/telepresence/issues/61)) - -Bug fixes: - -* When using `--run-shell`, allow access to the local host. - Thanks to Jean-Paul Calderone for the bug report. - ([#58](https://github.com/telepresenceio/telepresence/issues/58)) - -#### 0.17 (March 21, 2017) - -Bug fixes: - -* Fix problem with tmux and wrapping when using `--run-shell`. - Thanks to Jean-Paul Calderone for the bug report. - ([#51](https://github.com/telepresenceio/telepresence/issues/51)) -* Fix problem with non-login shells, e.g. with gnome-terminal. - Thanks to Jean-Paul Calderone for the bug report. - ([#52](https://github.com/telepresenceio/telepresence/issues/52)) -* Use the Deployment's namespace, not the Deployment's spec namespace since that may not have a namespace set. - Thanks to Jean-Paul Calderone for the patch. -* Hide torsocks messages. - Thanks to Jean-Paul Calderone for the bug report. - ([#50](https://github.com/telepresenceio/telepresence/issues/50)) - -#### 0.16 (March 20, 2017) - -Bug fixes: - -* Disable `--run-shell` on OS X, hopefully temporarily, since it has issues with System Integrity Protection. -* Fix Python 3 support for running `telepresence`. - -#### 0.14 (March 20, 2017) - -Features: - -* Added `--run-shell`, which allows proxying against local processes. - ([#1](https://github.com/telepresenceio/telepresence/issues/1)) - -#### 0.13 (March 16, 2017) - -Bug fixes: - -* Increase time out for pods to start up; sometimes it takes more than 30 seconds due to time to download image. - -#### 0.12 (March 16, 2017) - -Bug fixes: - -* Better way to find matching pod for a Deployment. - ([#43](https://github.com/telepresenceio/telepresence/issues/43)) - -#### 0.11 (March 16, 2017) - -Bug fixes: - -* Fixed race condition that impacted `--expose`. - ([#40](https://github.com/telepresenceio/telepresence/issues/40)) - -#### 0.10 (March 15, 2017) - -Bug fixes: - -* Fixed race condition the first time Telepresence is run against a cluster. - ([#33](https://github.com/telepresenceio/telepresence/issues/33)) - -#### 0.9 (March 15, 2017) - -Features: - -* Telepresence now detects unsupported Docker configurations and complain. - ([#26](https://github.com/telepresenceio/telepresence/issues/26)) -* Better logging from Docker processes, for easier debugging. - ([#29](https://github.com/telepresenceio/telepresence/issues/29)) - -Bug fixes: - -* Fix problem on OS X where Telepresence failed to work due to inability to share default location of temporary files. - ([#25](https://github.com/telepresenceio/telepresence/issues/25)) - -#### 0.8 (March 14, 2017) - -Features: - -* Basic logging of what Telepresence is doing, for easier debugging. -* Check for Kubernetes and Docker on startup, so problems are caught earlier. -* Better error reporting on crashes. ([#19](https://github.com/telepresenceio/telepresence/issues/19)) - -Bug fixes: - -* Fixed bug where combination of `--rm` and `--detach` broke Telepresence on versions of Docker older than 1.13. Thanks to Jean-Paul Calderone for reporting the problem. ([#18](https://github.com/telepresenceio/telepresence/issues/18)) diff --git a/docs/v1/reference/connecting.md b/docs/v1/reference/connecting.md deleted file mode 100644 index 82ed846e..00000000 --- a/docs/v1/reference/connecting.md +++ /dev/null @@ -1,146 +0,0 @@ -# Connecting to the cluster - -## Setting up the proxy - -To use Telepresence with a cluster (Kubernetes or OpenShift, local or remote) you need to run a proxy inside the cluster. -There are three ways of doing so. - -### Creating a new deployment - -By using the `--new-deployment` option `telepresence` can create a new deployment for you. -It will be deleted when the local `telepresence` process exits. -This is the default if no deployment option is specified. - -For example, this creates a `Deployment` called `myserver`: - -```shell -telepresence --new-deployment myserver --run-shell -``` - -This will create two Kubernetes objects, a `Deployment` and a `Service`, both named `myserver`. -(On OpenShift a `DeploymentConfig` will be used instead of `Deployment`.) -Or, if you don't care what your new `Deployment` is called, you can do: - -```shell -telepresence --run-shell -``` - -If `telepresence` crashes badly enough (e.g. you used `kill -9`) you will need to manually delete the `Deployment` and `Service` that Telepresence created. - -### Swapping out an existing deployment - -If you already have your code running in the cluster you can use the `--swap-deployment` option to replace the existing deployment with the Telepresence proxy. -When the `telepresence` process exits it restores the earlier state of the `Deployment` (or `DeploymentConfig` on OpenShift). - -```shell -telepresence --swap-deployment myserver --run-shell -``` - -If you have more than one container in the pods created by the deployment you can also specify the container name: - -```shell -telepresence --swap-deployment myserver:containername --run-shell -``` - -If `telepresence` crashes badly enough (e.g. you used `kill -9`) you will need to manually restore the `Deployment`. - - -### Running Telepresence manually - -You can also choose to run the Telepresence manually by starting a `Deployment` that runs the proxy in a pod. - -The `Deployment` should only have 1 replica, and use the Telepresence different image: - -
apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  name: myservice
-spec:
-  replicas: 1  # only one replica
-  template:
-    metadata:
-      labels:
-        name: myservice
-    spec:
-      containers:
-      - name: myservice
-        image: datawire/telepresence-k8s:$version$  # new image
-
- -You should apply this file to your cluster: - -```shell -kubectl apply -f telepresence-deployment.yaml -``` - -Next, you need to run the local Telepresence client on your machine, using `--deployment` to indicate the name of the `Deployment` object whose pod is running `telepresence/datawire-k8s`: - -```shell -telepresence --deployment myservice --run-shell -``` - -Telepresence will leave the deployment untouched when it exits. - - -## Kubernetes contexts and namespaces - -### kubectl context - -By default Telepresence uses whatever the current context is for `kubectl`. -If you want to choose a specific context you can use the `--context` option to `telepresence`. -For example: - -```shell -telepresence --context minikube --run-shell -``` - -You can choose any context listed in `kubectl config get-contexts`. - -If you've [set a namespace for the context](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#setting-the-namespace-preference) then that namespace will be used to find/create the `Deployment`, but you can also choose a namespace explicitly, as shown in the next section. - -### Kubernetes namespaces - -If you want to proxy to a Deployment in a non-default namespace you can pass the `--namespace` argument to Telepresence: - -```shell -telepresence --namespace yournamespace --swap-deployment yourservice --run-shell -``` - - -## Cluster permissions - -Telepresence uses `kubectl` or `oc` to manipulate your Kubernetes/OpenShift cluster. -This means the user who invokes Telepresence needs the appropriate authorization. For Kubernetes, the following Role should be sufficient: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: telepresence-role - namespace: restricted-ns -rules: -- apiGroups: [""] - resources: ["services"] - verbs: ["list", "create", "delete"] -- apiGroups: ["", "apps", "extensions"] - resources: ["deployments"] - verbs: ["list", "create", "get", "update", "delete"] -- apiGroups: ["", "apps", "extensions"] - resources: ["deployments/scale"] - verbs: ["get", "update"] -- apiGroups: ["", "apps", "extensions"] - resources: ["replicasets"] - verbs: ["list", "get", "update", "delete"] -- apiGroups: [""] - resources: ["pods"] - verbs: ["list", "get"] -- apiGroups: [""] - resources: ["pods/log"] - verbs: ["get"] -- apiGroups: [""] - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: [""] - resources: ["pods/exec"] - verbs: ["create"] -``` diff --git a/docs/v1/reference/developing.md b/docs/v1/reference/developing.md deleted file mode 100644 index 10d67e84..00000000 --- a/docs/v1/reference/developing.md +++ /dev/null @@ -1,361 +0,0 @@ -# Development Guide - -### Known issues - -* Docs get updated off of master, so documentation on site may reference unreleased features if you're not careful. - Until that's fixed, releases should happen as soon as new feature is merged to master. - See [issue #4](https://github.com/telepresenceio/telepresence/issues/4). - -### Setting up a development environment - -The following instructions will gets the Telepresence source and sets up some of its dependencies (torsocks, gcloud). -It also creates a virtualenv and installs Telepresence's Python dependencies into it. -The arguments required for `environment-setup.sh` are Google Cloud configuration items which identify a GKE cluster which can be used for testing, plus the operating system. - - -```console -$ git clone git@github.com:telepresenceio/telepresence.git -$ cd telepresence -$ ./environment-setup.sh $PROJECT $CLUSTER $ZONE -$ make virtualenv -``` - -You may want to activate the virtualenv (for the duration of your shell): - -```console -$ . virtualenv/bin/activate -``` - -This will give you access to the Telepresence executables: - -* `telepresence` -* `sshuttle-telepresence`. - -You can test your modifications to Telepresence with the `build` tool: - -```console -$ make check TELEPRESENCE_REGISTRY= [PYTEST_ARGS=] -``` - -You can run a subset of the tests using the pytest features for selecting tests -(for example, `-k` and `-m`). -End-to-end tests are marked with the method and operation they exercise. -So, for example, you can run all of the tests in the vpn-tcp, swap-deployment configuration: - -```console -$ make check TELEPRESENCE_REGISTRY= PYTEST_ARGS="-m 'vpn_tcp and swap_deployment'" -``` - -Note that `-` must be replaced with `_` due to pytest limitations. - -See `make help` for details about how to run specific tests. - -You can also build images and push them to a registry without running any tests: - -```console -$ make docker-push TELEPRESENCE_REGISTRY= -``` - -If you want to push images to the local registry, start the container first: - -```console -$ docker run -d -p 5000:5000 --restart=always --name docker-local-registry registry -``` - -and then simply use `TELEPRESENCE_REGISTRY=localhost:5000`. - -Or if you want to build images using minikube (untested): - -```console -$ eval $(minikube docker-env --shell bash) -$ make docker-push TELEPRESENCE_REGISTRY= -``` - -Or using minishift (untested): - -```console -$ eval $(minishift docker-env --shell bash) -$ make docker-push TELEPRESENCE_REGISTRY= -``` - -### End-to-End Testing - -The Telepresence test suite includes a set of tests which run Telepresence as a real user would. -These tests launch Telepresence and have it communicate with a real Kubernetes cluster, running real pods and observing the results. -These tests are implemented in `tests/test_endtoend.py` and `tests/test_endtoend_distinct.py`. - -While the test functions themselves are present in these test modules, -there are several additional support modules also involved. -`tests/probe_endtoend.py` is a Python program which the tests tell Telepresence to run. -`tests/parameterize_utils.py` is a support module for writing tests. -`tests/conftest.py` integrates the tests with pytest. -At points during end-to-end test development you may find yourself working with any of these sources. - -With the aim of making it clear how to write your own end-to-end test, here is one dissected. - -#### Test Probe - -```python -from .conftest import ( - with_probe, -) - -@with_probe -def test_demonstration(probe): -``` - -The end-to-end tests are written using a number of pytest features. -The first is parameterized fixtures to make it easy to apply a test to all Telepresence execution modes. - -Notice that the test function is defined to take an argument `probe`. -The argument must be named `probe` to select the correct pytest fixture. -The `with_probe` decorator parameterizes the `probe` fixture with all of the Telepresence execution modes. -This means that pytest will call this test function many times with different values for `probe`. -For example, the test function will be called with a probe associated with a run of Telepresence given the `--method=container --new-deployment` arguments. -The test function is called once for each combination of method arguments and operations (`--new-deployment`, `--swap-deployment`, etc). - - -#### Probe Result - -```python - probe_environment = probe.result().result["environ"] -``` - -`probe.result()` will be used in every end-to-end test. -This method returns an object - a `ProbeResult` - representing the Telepresence run. -This *may* initiate a new run of Telepresence but it may also re-use the Telepresence launched by an earlier test with the same configuration (the same `probe`). -This is the result of pytest fixture optimization and it allows the test suite to run Telepresence far fewer times than would otherwise be required (reducing the overall runtime of the test suite). - -The Telepresence probe collects some information from the Telepresence execution context immediately upon starting. -The `result` attribute of the `ProbeResult` provides access to this information. -In this case, we retrieve the complete POSIX environment for inspection. - -#### Probe Operation - -```python - if probe.operation.inherits_deployment_environment(): -``` - -This test now prepares to make its first assertion. -This first assertion is guarded by a check against the result of a method of `probe.operation`. -`probe.operation` is a reference to an object representing the operation which the probe used. -Remember that a test decorated with `with_probe` will be run multiple times with different `probe` arguments. -Many of those probes will be configured with a different operation. -This attribute lets a test vary its behavior based on that operation. -This is useful because different operations may have different desired behavior and require different assertions in their tests. -For more details about what can be done with the operation object, see `tests/parameterize_utils.py` where operations are implemented. - -In this case, `inherits_deployment_environment` is a method of the operation which returns a boolean. -The result indicates whether it is expired and desired that the Telepresence execution context's POSIX environment inherits environment variables that were set in a pre-existing Kubernetes Deployment resource. -Not all Telepresence configurations interact with a pre-existing Deployment - hence the need for this check. - -Supposing we are running with a probe where this check succeeds: - -```python - desired = probe.DESIRED_ENVIRONMENT - expected = { - k: probe_environment.get(k, None) - for k in probe.DESIRED_ENVIRONMENT - } - assert desired == expected -``` - -Here the test makes an assertion about the observed POSIX environment. -It looks up the value of the environment which *ought* to have been inherited - `probe.DESIRED_ENVIRONMENT` - and makes sure the items all appear in the observed environment. - -For the `else` case of this branch, we might assert that `desired` does *not* appear in the observed environment. - -#### Probe Method - -Test can also inspect the method in use. - -```python - if probe.method.inherits_client_environment(): - assert probe.CLIENT_ENV_VAR in probe_environment -``` - -The idea here is similar. -Different behavior may be desired from different methods. -Inspection of `probe.method` provides a way to vary the test behavior based on this. -Methods are implemented in `tests/parameterize_utils.py`. - -#### Probe Interaction - -The Telepresence process associated with the probe continues to run while the tests run. -This means interactions with it are possible. -Simple messages can easily be exchanged with the probe. - -```python - probe_result.write("probe-also-proxy {}".format(hostname)) - success, request_ip = loads(probe_result.read()) -``` - -This uses a command supported by the probe which makes it issue an HTTP request to a particular URL and return the response. -These commands are implemented in `probe_endtoend.py`. -In this case, the result is a two-tuple. -The first element indicates whether the HTTP request succeeded or not. -The second element gives some data from the HTTP response (if it succeeded). - -Probe commands are useful for observing any state or behavior which is only visible in the Telepresence execution context. -They allow the test to retrieve the information so assertions can be made. - -#### Final Thoughts - -When writing end-to-end tests keep a few things in mind: - -##### Shared Telepresence - -The `probe` fixture re-uses `Probe` instances. -Tests should not modify the `Probe` passed in for the `probe` argument. -Doing so will invalidate the results of subsequent tests. - -Likewise, the `Probe` instance has an associated `Telepresence` process. -Tests should not modify that process, either, or subsequent tests will be invalidated. -This should be fairly easy since there's not _much_ that can be done to "modify" a running Telepresence process. -One very obvious example, though, is that the process can be killed. -Don't do that. - -##### End-to-end Debugging - -When such a test fails, -the *default* is for it to present a low-information failure in the test suite result. -This may be the test suite hanging and being killed by a pytest timeout. -Or it may be Telepresence crashing and the full Telepresence log being dumped. -These kind of test failures are challenging to debug. -Be sure to examine all of the information available. -If not enough information is available, add logging to Telepresence or the test suite. -*Do* write your tests first, observe them fail, and improve their failure behavior before making them pass. - -##### Unit Tests - -End-to-end tests provide a highly realistic model of real-world Telepresence behavior. -However, they are not the only option and not always the best option. -For subtle logic (particularly involving many possible outcomes), unit tests may provide a lower-cost option. -A single end-to-end test to verify a gross code path combined with many unit tests to exercise all of the subtleties can provide the best of both worlds. - -### Coding standard - -Formatting is enforced by the installed `yapf` tool; to reformat the code, you can do: - -```console -$ make format -``` - -### Releasing Telepresence - -#### Overview - -Every commit to the master branch results in CI building a set of deployable artifacts: Docker images, Linux packages, a JSON blob for [Scout](../usage_reporting), and a markdown blob for announcing a release on Slack et al. -The artifacts are available for download as a tarball `telepresence-dist.tbz` from the CircleCI artifacts tab on the `deploy` job page. -The release process pushes a set of those artifacts into production. - -At the moment, the Linux packages are not tested, other than a minor smoke test. Package repositories are [not tested](https://github.com/telepresenceio/telepresence/issues/109) at all. - -#### Theory of operation - -0. Recreate your Python virtual environment from scratch and re-run the linters. - This avoids the frustration of having your release fail in the lint stage in CI, which rebuilds its virtualenv every time. - `rm -r virtualenv && make lint` -1. Make sure `docs/reference/changelog.md` has changelog entries for the next release, and today's release date. - If changelog entries are in the `newsfragments` directory, use [towncrier](https://pypi.org/project/towncrier/) to construct the changelog update. - towncrier's version management is incompatible with the rest of the universe; specify the new version explicitly. - Make sure to commit your changes. - `virtualenv/bin/towncrier --version 0.xx` - `# Edit the change log` - `git add docs/reference/changelog.md` - `git commit -m "Prep changelog for release 0.xx"` -2. Mark the new version number for Telepresence by tagging in Git. - `git tag -a 0.xx -m "Release 0.xx"` -3. Push the new commit and tag to GitHub. - `git push origin master --tags` -4. Wait for [CircleCI](https://circleci.com/gh/telepresenceio/workflows/telepresence/tree/master) to finish. - Make sure all the test pass. -5. Download the tarball of deployable artifacts and unarchive into container in your project directory. It will populate the `dist` subdirectory. - `curl -s https://.../telepresence-dist.tbz | tar xjf -` -6. Set up release credentials in the environment: - `. /keybase/team/datawireio/tel-release-secrets.sh` - * `HOMEBREW_KEY` to push to GitHub - * `PACKAGECLOUD_TOKEN` to push Linux packages - * `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for AWS S3 access -7. Run the release script. - `ci/release.sh` -8. Log in to [Netlify](https://app.netlify.com/teams/telepresence/sites) and rebuild the Telepresence website. - Select "Deploys" then "Trigger Deploy" then "Clear cache and deploy site" to get a clean build. - Without this step, more often than not, the website will display an old version number. -9. Post the release announcement on Slack et al. - The release script outputs the announcement, or you can find it in `dist/announcement.md`. - -#### What the release script does - -1. Build and launch a Docker container with required tools. -2. Upload Linux packages to PackageCloud using the script that's generated by the deployment script. -3. Update the Homebrew formula in [homebrew-blackbird](https://github.com/datawire/homebrew-blackbird). - The Homebrew formula refers to the tarball GitHub [generates for each release](https://github.com/telepresenceio/telepresence/releases). -4. Push the Scout blobs to the `datawire-static-files` S3 bucket. - [In the future](https://github.com/telepresenceio/telepresence/issues/285), Telepresence will be able to inform users that a new version is available using this data. - - -### Running tests - -#### Full test suites - -In order to run *all* possible code paths in Telepresence, you need to do the following: - -| Test environment | How to run | -|--------------------|------------------------------------------------------| -| Minikube | `make minikube-test` | -| Remote K8s cluster | Runs on Circle | -| Minishift | `make openshift-tests` with minishift kube context | -| Remote OS cluster | `make openshift-tests` with remote OpenShift context | -| Docker on Mac | `make minikube-test` on Mac with Docker | -| Other Mac | Runs on Circle | - -In practice running on remote OpenShift cluster usually doesn't happen. - -CircleCI on Mac does not yet support Docker, which is why that needs to be done manually. - -#### Running individual tests - -When doing local development you will typically run all tests by doing: - -> `make minikube-test` - -If you want to only run some tests you can pass arguments to the underlying `py.test` run using `TELEPRESENCE_TESTS`. -For example, to run all tests containing the string "fromcluster" and to exit immediately after first failed test: - -> `TELEPRESENCE_TESTS="-x -k fromcluster" make minikube-test` - -See `py.test --help` for other options you might want to set in `TELEPRESENCE_TESTS`. - -### Running a local copy of `telepresence` - -FIXME: This is out-of-date. The above section of setting up a development environment has the correct info, but lacks a clear example like this section has. - -During local development, typically against minikube, you will want to manually run `telepresence` you are working on. -You need to: - -1. Make sure `minikube` has latest server-side Docker image: `make build-k8s-proxy-minikube` will do this. It has issues on Mac, however, due to old version of `make` maybe? Read the `Makefile` to see what it does. - If you forget this step you will have problems with Minikube not finding the Docker image for `telepresence-k8s`. -2. If you're using `--docker-run`, your local Docker needs to have the latest Docker image: `make build-local`. -3. You need to run `cli/telepresence` with env variable telling it the version number it should be using; this will be used as the tag for Docker images you created in steps 1 and 2. You do this by setting `TELEPRESENCE_VERSION` to the output of `make version`. You also need to set `PATH` so `sshuttle-telepresence` is found. - -For example: - -```console -$ cli/telepresence --version -0.61 - -$ make version -0.61-1-gadd8818 - -$ make build-k8s-proxy-minikube -... - -$ env PATH=$PATH:$PWD/virtualenv/bin/ TELEPRESENCE_VERSION=$(make version) \ - cli/telepresence --version -0.61-1-gadd8818 - -$ env PATH=$PATH:$PWD/virtualenv/bin/ TELEPRESENCE_VERSION=$(make version) \ - cli/telepresence --run-shell -@minikube|$ ... -``` diff --git a/docs/v1/reference/install.md b/docs/v1/reference/install.md deleted file mode 100644 index 4e6f1091..00000000 --- a/docs/v1/reference/install.md +++ /dev/null @@ -1,23 +0,0 @@ -import * as Macros from "../macros"; - -# Installing Telepresence - - - -### Dependencies - -If you install Telepresence using a pre-built package, dependencies other than [`kubectl`][k] are handled by the package manager. If you install from source, you will also need to install the following software. - -[k]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ - -- `kubectl` (OpenShift users can use `oc`) -- Python 3.5 or newer -- OpenSSH (the `ssh` command) -- `sshfs` to mount the pod's filesystem -- `conntrack` and `iptables` on Linux for the vpn-tcp method -- `torsocks` for the inject-tcp method -- Docker for the container method -- `sudo` to allow Telepresence to - - modify the local network (via `sshuttle` and `pf`/`iptables`) for the vpn-tcp method - - run the `docker` command in some configurations on Linux - - mount the remote filesystem for access in a Docker container diff --git a/docs/v1/reference/limitations.md b/docs/v1/reference/limitations.md deleted file mode 100644 index e7628a77..00000000 --- a/docs/v1/reference/limitations.md +++ /dev/null @@ -1,54 +0,0 @@ -# Troubleshooting & Workarounds - -### Method-specific limitations - -For method-specific limitations see the documentation on the [available proxying methods](/reference/methods.html). - -### General limitations & workarounds - -#### Docker containers - -When using `--method vpn-tcp` or `--method inject-tcp` a container run via `docker run` will not inherit the outgoing functionality of the Telepresence shell. -If you want to use Telepresence to proxy a containerized application you should use [`--method container`](/tutorials/docker.html). - -#### `localhost` and the pod - -`localhost` and `127.0.0.1` will access the host machine, i.e. the machine where you ran `telepresence`. -If you're using the container method, `localhost` will refer to your local container. -In either case, `localhost` will not connect you to other containers in the pod. -This can be a problem in cases where you are running multiple containers in a pod and you need your process to access a different container in the same pod. - -The solution is to use `--to-pod PORT` and/or `--from-pod PORT` to tell Telepresence to set up additional forwarding. -If you want to make connections from your local session to the pod, e.g., to access a proxy/helper sidecar, use `--to-pod PORT`. -On the other hand, if you want connections from other containers in the pod to reach your local session, use `--from-pod PORT`. - -An alternate solution to connect to the pod is to access the pod via its IP, rather than at `127.0.0.1`. -You can have the pod IP configured as an environment variable `$MY_POD_IP` in the Deployment using the Kubernetes [Downward API](https://kubernetes.io/docs/tasks/configure-pod-container/environment-variable-expose-pod-information/): - -
apiVersion: extensions/v1beta1
-kind: Deployment
-spec:
-  template:
-    spec:
-      containers:
-      - name: servicename
-        image: datawire/telepresence-k8s:$version$
-        env:
-        - name: MY_POD_IP
-          valueFrom:
-            fieldRef:
-              fieldPath: status.podIP
-
- -#### Fedora 18+/CentOS 7+/RHEL 7+ and `--docker-run` - -Fedora 18+/CentOS 7+/RHEL 7+ ship with firewalld enabled and running by default. In its default configuration this will drop traffic on unknown ports originating from Docker's default bridge network - usually `172.17.0.0/16`. - -To resolve this issue, instruct firewalld to trust traffic from `172.17.0.0/16`: - -``` -sudo firewall-cmd --permanent --zone=trusted --add-source=172.17.0.0/16 -sudo firewall-cmd --reload -``` - -For more details see [issue # 464](https://github.com/telepresenceio/telepresence/issues/464). diff --git a/docs/v1/reference/methods.md b/docs/v1/reference/methods.md deleted file mode 100644 index 403db5f7..00000000 --- a/docs/v1/reference/methods.md +++ /dev/null @@ -1,81 +0,0 @@ -# Proxying methods - -### Choosing a proxying method - -Telepresence has three different proxying methods; you will need to choose one of them. - -1. `--method inject-tcp` works by injecting a shared library into the subprocess run by Telepresence using `--run` and `--run-shell`. -2. `--method vpn-tcp` works by using a program called [sshuttle](https://sshuttle.readthedocs.io) to open a VPN-like connection to the Kubernetes cluster. -3. `--method container` is documented in the [Docker tutorial](/tutorials/docker.html). - -In general `vpn-tcp` should work in more cases, and it is chosen by default (unless `--docker-run` is used, in which case the `container` method is the default.) -If you want to run more than one telepresence connection per machine, or if you don't want proxying to affect all processes, use `inject-tcp`. - -You can read about the specific limitations of each method below, and read about the differences in what they proxy in the documentation of [what gets proxied](/reference/proxying.html). - -### Limitations: `--method vpn-tcp` - -`--method vpn-tcp` should work with more programs (and programming languages) than `--method inject-tcp`. -For example, if you're developing in Go you'll want to stick to this method. - -This method does have some limitations of its own, however: - -* Fully qualified Kubernetes domains like `yourservice.default.svc.cluster.local` won't resolve correctly on Linux. - `yourservice` and `yourservice.default` will resolve correctly, however. - See [the relevant ticket](https://github.com/telepresenceio/telepresence/issues/161) for details. -* Only one instance of `telepresence` using `vpn-tcp` should be running at a time on any given developer machine. Running other - instances with other proxying methods concurrently is possible. -* VPNs may interfere with `telepresence`, and vice-versa: don't use both at once. -* Cloud resources like AWS RDS will not be routed automatically via cluster. - You'll need to specify the hosts manually using `--also-proxy`, e.g. `--also-proxy mydatabase.somewhere.vpc.aws.amazon.com` to route traffic to that host via the Kubernetes cluster. Specify multiple hosts to `--also-proxy` like `--also-proxy example1.com --also-proxy example2.com --also-proxy example3.com`. - -### Limitations: `--method inject-tcp` - -If you're using `--method inject-tcp` you will have certain limitations. - -#### Incompatible programs - -Because of the mechanism Telepresence uses to intercept networking calls when using `inject-tcp`: - -* suid binaries won't work inside a Telepresence shell. -* Statically linked binaries won't work. -* Custom DNS resolvers that parse `/etc/resolv.conf` and do DNS lookups themselves won't work. - -Thus command line tools like `ping`, `nslookup`, `dig`, `host` and `traceroute` won't work either because they do lower-level DNS or are suid. - -However, this only impacts outgoing connections. -Incoming proxying (from Kubernetes) will still work with these binaries. - -#### Golang - -Programs written with the Go programming language will not work by default with this method. -We recommend using `--method vpn-tcp` instead if you're writing Go, since that method will work with Go. - -`--method inject-tcp` relies on injecting a shared library into processes you run, and Go uses a custom system call implementation and has its own DNS resolver. -This causes connections *to* Kubernetes not to work. -On OS X many Go programs won't start all, including `kubectl`. - -If you don't want to use `--method vpn-tcp` for some reason you can also work around these limitations by doing the following in your development environment (there is no need to change anything for production): - -* Use `gccgo` instead of `go build`. -* Do `export GODEBUG=netdns=cgo` to [force Go to use the standard DNS lookup mechanism](https://golang.org/pkg/net/#hdr-Name_Resolution) rather than its own internal one. - -But the easiest thing to do, again, is to use `--method vpn-tcp`, which *does* work with Go. - -#### MacOS System Integrity Protection - -In OS X El Capitan (10.11), Apple introduced a security feature called System Integrity Protection (SIP). - -* [Apple's _About SIP on your Mac_ article](https://support.apple.com/en-us/HT204899) -* [Apple's SIP guide for developers](https://developer.apple.com/library/content/documentation/Security/Conceptual/System_Integrity_Protection_Guide/Introduction/Introduction.html#//apple_ref/doc/uid/TP40016462-CH1-DontLinkElementID_15) -* [Wikipedia article about SIP](https://en.wikipedia.org/wiki/System_Integrity_Protection) - -SIP prevents, among other things, code injection into processes that originate from certain designated "protected directories" (including `/usr` and `/bin`). This includes [purging dynamic linker environment variables](https://developer.apple.com/library/content/documentation/Security/Conceptual/System_Integrity_Protection_Guide/RuntimeProtections/RuntimeProtections.html) for these processes. These protections are in place even when running as root. They can only be disabled by booting into recovery mode, and disabling them is highly discouraged. - -Telepresence attempts to work around SIP to some extent by creating duplicates of `/bin`, `/usr/bin`, etc. and putting those in the `PATH` instead of the SIP-protected originals. This allows the user to type something like `env ENABLE_FROBULATE=1 ./my_binary` and get the benefits of `inject-tcp`; the `env` binary comes from an unprotected location in `/tmp`. - -What _does not_ work is using the full path to `/bin/sh` or `/usr/bin/env` or similar, since Telepresence cannot manipulate those commands when located in those directories. In practice, avoiding protected binaries is difficult because it is common for tools and scripts to use `sh` or `env` by full path, thereby losing Telepresence's injected libraries. As a result, connections _to_ Kubernetes do not work. - -Carefully avoiding protected binaries is the only reliable workaround. One hackish approach would be to create a directory tree containing copies of the binaries in the protected directories in a stable location (e.g., `~/bin_copy`). That would allow changing all tools and scripts to use those unprotected copies; this would have to be done in production as well as on your development machine. It would be much easier to use `--method vpn-tcp` instead. - -See [issue 268](https://github.com/telepresenceio/telepresence/issues/268) for one user's experience. diff --git a/docs/v1/reference/proxying.md b/docs/v1/reference/proxying.md deleted file mode 100644 index 854c1588..00000000 --- a/docs/v1/reference/proxying.md +++ /dev/null @@ -1,146 +0,0 @@ -# What gets proxied - -### Networking access from the cluster - -If you use the `--expose` option for `telepresence` with a given port the pod will forward traffic it receives on that port to your local process. -This allows the Kubernetes or OpenShift cluster to talk to your local process as if it was running in the pod. - -By default the remote port and the local port match. -Here we expose port 8080 as port 8080 on a remote Deployment called `example`: - -```console -$ telepresence --expose 8080 --new-deployment example \ - --run python3 -m http.server 8080 -``` - -It is possible to expose a different local port than the remote port. -Here we expose port 8080 locally as port 80 on a remote Deployment called `example2`: - -```console -$ telepresence --expose 8080:80 --new-deployment example2 \ - --run python3 -m http.server 80 -``` - -You can't expose ports <1024 on clusters that don't support running images as `root`. -This limitation is the default on OpenShift. - -### Networking access to the cluster - -The locally running process wrapped by `telepresence` has access to everything that a normal Kubernetes pod would have access to. -That means `Service` instances, their corresponding DNS entries, and any cloud resources you can normally access from Kubernetes. - -To see this in action, let's start a `Service` and `Pod` called `"helloworld"` in Kubernetes in the default namespace `"default"`, and wait until it's up and running. -The resulting `Service` will have three DNS records you can use: - -1. `helloworld`, from a pod in the `default` namespace. -2. `helloworld.default` anywhere in the Kubernetes cluster. -3. `helloworld.default.svc.cluster.local` anywhere in the Kubernetes cluster. - This last form will not work when using `telepresence` with `--method=vpn-tcp` on Linux (see [the relevant ticket](https://github.com/telepresenceio/telepresence/issues/161) for details.) - -We'll check the current Kubernetes context and then start a new pod: - -```console -$ kubectl run --expose helloworld --image=nginx:alpine --port=80 -``` - -Wait 30 seconds and make sure a new pod is available in `Running` state: - -```console -$ kubectl get pod helloworld -NAME READY STATUS RESTARTS AGE -helloworld 1/1 Running 0 33s -``` - -Now you can send queries to the new `Service` as if you were running inside Kubernetes: - -```console -$ telepresence --run curl http://helloworld.default - - - -Welcome to nginx! -... -``` - -> **Having trouble?** Ask us a question in our [Slack chatroom](https://a8r.io/slack). - -### Networking access to cloud resources - -When using `--method=inject-tcp`, the subprocess run by `telepresence` will have *all* of its traffic routed via the cluster. -That means transparent access to cloud resources like databases that are accessible from the Kubernetes cluster's private network or VPC. -It also means public servers like `google.com` will be routed via the cluster, but again only for the subprocess run by `telepresence` via `--run` or `--run-shell`. - -When using `--method=vpn-tcp` *all* processes on the machine running `telepresence` will have access to the Kubernetes cluster. -Cloud resources will only be routed via the cluster if you explicitly specify them using `--also-proxy `. -Access to public websites should not be affected or changed in any way. - -### Environment variables - -Environment variables set in the `Deployment` pod template will be available to your local process. -You also have access to all the environment variables Kubernetes sets automatically. -For example, here you can see the environment variables that get added for each `Service`: - -```console -$ telepresence --run env | grep KUBERNETES -KUBERNETES_PORT=tcp://10.0.0.1:443 -KUBERNETES_SERVICE_PORT=443 -KUBERNETES_PORT_443_TCP_ADDR=10.0.0.1 -KUBERNETES_PORT_443_TCP_PORT=443 -KUBERNETES_PORT_443_TCP_PROTO=tcp -KUBERNETES_PORT_443_TCP=tcp://10.0.0.1:443 -KUBERNETES_SERVICE_HOST=10.0.0.1 -``` - -### Volumes - -Volumes configured in the `Deployment` pod template will also be made available to your local process. -This will work better with read-only volumes with small files like `Secret` and `ConfigMap`; a local database server writing to a remote volume will be slow. - -Volume support requires a small amount of work on your part. -The root directory where all the volumes can be found will be set to the `TELEPRESENCE_ROOT` environment variable in the shell or subprocess run by `telepresence`. -You will then need to use that env variable as the root for volume paths you are opening. - -You can see an example of this in the [Volumes Howto](../../howto/volumes.html). - -### The complete list: what Telepresence proxies - -#### `--method inject-tcp` - -When using `--method inject-tcp`, Telepresence currently proxies the following: - -* The [special environment variables](https://kubernetes.io/docs/user-guide/services/#environment-variables) that expose the addresses of `Service` instances. - E.g. `REDIS_MASTER_SERVICE_HOST`. -* The standard [DNS entries for services](https://kubernetes.io/docs/user-guide/services/#dns). - E.g. `redis-master` and `redis-master.default.svc.cluster.local` will resolve to a working IP address. - These will work regardless of whether they existed when the proxy started. -* TCP connections to other `Service` instances, regardless of whether they existed when the proxy was started. -* Any environment variables that the `Deployment` explicitly configured for the pod. -* TCP connections to any hostname/port; all but `localhost` will be routed via Kubernetes. - Typically this is useful for accessing cloud resources, e.g. a AWS RDS database. -* TCP connections *from* Kubernetes to your local machine, for ports specified on the command line using `--expose` -* Access to volumes, including those for `Secret` and `ConfigMap` Kubernetes objects. -* `/var/run/secrets/kubernetes.io` credentials (used to the [access the Kubernetes( API](https://kubernetes.io/docs/user-guide/accessing-the-cluster/#accessing-the-api-from-a-pod)). - -Currently unsupported: - -* SRV DNS records matching `Services`, e.g. `_http._tcp.redis-master.default`. -* UDP messages in any direction. - -#### `--method vpn-tcp` - -When using `--method vpn-tcp`, Telepresence currently proxies the following: - -* The [special environment variables](https://kubernetes.io/docs/user-guide/services/#environment-variables) that expose the addresses of `Service` instances. - E.g. `REDIS_MASTER_SERVICE_HOST`. -* The standard [DNS entries for services](https://kubernetes.io/docs/user-guide/services/#dns). - E.g. `redis-master` and `redis-master.default`, but not those ending with `.local`. -* Any environment variables that the `Deployment` explicitly configured for the pod. -* TCP connections to any `Service` in the cluster regardless of when they were started, as well as to any hosts or ranges explicitly listed with `--also-proxy`. It is possible to specify multiple hosts to `--also-proxy` like so: `--also-proxy example1.com --also-proxy example2.com --also-proxy example3.com`. -* TCP connections *from* Kubernetes to your local machine, for ports specified on the command line using `--expose`. -* Access to volumes, including those for `Secret` and `ConfigMap` Kubernetes objects. -* `/var/run/secrets/kubernetes.io` credentials (used to the [access the Kubernetes( API](https://kubernetes.io/docs/user-guide/accessing-the-cluster/#accessing-the-api-from-a-pod)). - -Currently unsupported: - -* Fully qualified Kubernetes DNS names that end with `.local`, e.g. `redis-master.default.svc.cluster.local`, won't work on Linux (see [the relevant ticket](https://github.com/telepresenceio/telepresence/issues/161) for details.) -* UDP messages in any direction. diff --git a/docs/v1/reference/running.md b/docs/v1/reference/running.md deleted file mode 100644 index 38d21c34..00000000 --- a/docs/v1/reference/running.md +++ /dev/null @@ -1,25 +0,0 @@ -# Running local processes - -There are two ways you can have Telepresence run your local process. - -### `--run` - -`--run` takes one or more arguments and runs the resulting command, e.g. to run `ruby myserver.rb` you can do: - -```console -$ telepresence --run ruby myserver.rb -``` - -This process will have access to the environment variables, outgoing proxying and volumes proxied by Telepresence. -(Volumes aren't as transparent, sadly: see the [relevant docs](../proxying). - -### `--run-shell` - -`--run-shell` takes no arguments, it simply runs a `bash` process: - -```console -$ telepresence --run-shell -@minikube|$ -``` - -Any process run inside the shell will have access to the environment variables, outgoing proxying and volumes proxied by Telepresence. diff --git a/docs/v1/reference/upgrade.md b/docs/v1/reference/upgrade.md deleted file mode 100644 index ebc4495f..00000000 --- a/docs/v1/reference/upgrade.md +++ /dev/null @@ -1,34 +0,0 @@ -# Upgrading Telepresence - -#### OS X -On OS X you can upgrade Telepresence by running the following: - -```shell -brew upgrade datawire/blackbird/telepresence-legacy -``` - -#### Ubuntu -Run the following to upgrade Telepresence: - -```shell -sudo apt update -sudo apt install --no-install-recommends telepresence -``` - -#### Fedora -Run the following to upgrade Telepresence: - -```shell -sudo dnf upgrade telepresence -``` - -### Telepresence 2 for Teams -Telepresence 2 is open source and available to [use now](/docs/latest/quick-start/). Rewritten in Go to be faster and more resilient, Telepresence 2 has a [new architecture](/docs/latest/reference/architecture/) to better serve Kubernetes development teams with multiple users. [Try it today](/docs/latest/quick-start/) and [share your feedback](https://a8r.io/slack) with the team. - -### More - -Take a look at the [changelog](../changelog) to see what's new. - -See what others are up to, including tools for scaling Telepresence to your teams, on the [community page](/community). - -Get involved! Find us in the [Slack chatroom](https://a8r.io/slack) or [submit a pull request](https://github.com/telepresenceio/telepresence/pulls). diff --git a/docs/v1/reference/usage_reporting.md b/docs/v1/reference/usage_reporting.md deleted file mode 100644 index 16c133ad..00000000 --- a/docs/v1/reference/usage_reporting.md +++ /dev/null @@ -1,36 +0,0 @@ -# Usage Reporting - -Telepresence collects some basic information about its users so it can send important client notices, such as new version availability and security bulletins. -We also use the information to aggregate basic usage analytics anonymously. - -## Why? - -- We want to know how you are using our software, so we can make it better for you. - Knowing what versions are being used, in aggregate, is very helpful for development and testing. -- We ship new releases frequently, with new features and bug fixes. - We want you to know when we ship a new release. - -## What is reported? - -The following information is collected and sent during version checks: - -- Application name ("telepresence") -- Application version -- Installation identifier (locally generated for only Telepresence and stored in `~/.config/telepresence/id`) -- Platform information (operating system, Python version) -- Kubernetes version -- Operation (e.g., "swap-deployment") and method (e.g., "vpn-tcp") - -The reporting code can be found in [`telepresence/usage_tracking.py`][1]. - -[1]: https://github.com/telepresenceio/telepresence/blob/master/telepresence/usage_tracking.py - -## When is it reported? - -Telepresence collects and reports usage every time a session is launched. - -## Can it be disabled? - -Yes. Set the environment variable `SCOUT_DISABLE`. - - export SCOUT_DISABLE=1 diff --git a/docs/v1/reference/windows.md b/docs/v1/reference/windows.md deleted file mode 100644 index a116f971..00000000 --- a/docs/v1/reference/windows.md +++ /dev/null @@ -1,18 +0,0 @@ -# Windows support - -If you are running Windows 10 Creators Edition (released April 2017), you have access to the Windows Subsystem for Linux. -This allows you to run Linux programs transparently inside Windows, with access to the normal Windows filesystem. -Some older versions of Windows also had WSL, but those were based off Ubuntu 14.04 and will not work with Telepresence. - -To run Telepresence inside WSL: - -1. Install [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install). -2. Start the BASH.exe program. -3. Install Telepresence by following the Ubuntu instructions above. - -Caveats: - -* At the moment volumes are not supported on Windows, but [we plan on fixing this](https://github.com/telepresenceio/telepresence/issues/115). -* Only the `inject-tcp` method is supported. -* Network proxying won't affect Windows binaries. - You can however edit your files in a Windows program (and compile Java or .NET packages), and then run them with the Linux interpreters or VMs. diff --git a/docs/v1/tutorials/docker.md b/docs/v1/tutorials/docker.md deleted file mode 100644 index b6bf282d..00000000 --- a/docs/v1/tutorials/docker.md +++ /dev/null @@ -1,161 +0,0 @@ -import * as Macros from "../macros"; - -# Fast development workflow with Docker and Kubernetes - -Keeping development environments in sync is a constant pain. Containerizing your development environment enables your service to run in the exact same environment everywhere, from your laptop to production (for more details on the benefits of a container native development workflow, see [this post by Matt Butcher](https://open.microsoft.com/2018/04/23/5-reasons-you-should-be-doing-container-native-development/).) - -Telepresence, in conjunction with a containerized development environment, gives the developer a fast development workflow in developing a multi-container application on Kubernetes. Telepresence lets you run a Docker container locally while proxying it to your Kubernetes cluster. - -In this HOWTO, we'll walk through how to use Telepresence with a containerized Docker environment to build a fast development workflow. - - - -## Quick example - -We'll start with a quick example. Apply [this manifest](https://github.com/telepresenceio/telepresence/blob/master/docs/tutorials/hello-world.yaml) -to create a deployment and service both named hello-world, exposed on port 8000. -Then confirm that the deployment becomes ready: - -```console -$ kubectl apply -f https://raw.githubusercontent.com/telepresenceio/telepresence/master/docs/tutorials/hello-world.yaml -deployment.apps/hello-world created -service/hello-world created - -$ kubectl get deployments -NAME READY UP-TO-DATE AVAILABLE AGE -deployment.apps/hello-world 1/1 1 1 6s -``` - -It may take a minute or two for the pod running the server to be up and running, -depending on how fast your cluster is. - -You can now run a Docker container using Telepresence that can access that -service, even though the process is local but the service is running in the -Kubernetes cluster: - -```console -$ telepresence --docker-run --rm -it pstauffer/curl curl http://hello-world:8000/ -[...] -T: Setup complete. Launching your container. -Hello, world! -T: Your process has exited. -[...] -``` - - -## Setting up a development environment in Docker - -So how would we use Telepresence to do actual *development* of the hello-world service? We'll set up a local Dockerized development environment for hello-world. Clone the hello-world repo: - -```console -$ git clone https://github.com/datawire/hello-world -Cloning into 'hello-world'... -[...] -$ cd hello-world -``` - -In the repository is a [`Dockerfile`](https://github.com/datawire/hello-world/blob/master/Dockerfile) that builds a runtime environment for the hello-world service. - -Build the runtime environment and tag it `hello-dev`: - -```console -$ docker build -t hello-dev . -Sending build context to Docker daemon 24.58kB -Step 1/7 : FROM python:3-alpine - ---> a93594ce93e7 -[...] - ---> 7d692d619894 -Successfully built 7d692d619894 -Successfully tagged hello-dev:latest -``` - -We'll use Telepresence to swap the hello-world deployment with the local Docker image. Behind the scenes, Telepresence invokes `docker run`, so it supports any arguments you can pass to `docker run`. In this case, we're going to also mount our local directory to `/usr/src/app` in your Docker container. Make sure your current working directory is the `hello-world` directory, since we're going to mount that directly into the container. - -```console -$ telepresence --swap-deployment hello-world --docker-run --rm -it -v $(pwd):/usr/src/app hello-dev -T: Volumes are rooted at $TELEPRESENCE_ROOT. See https://telepresence.io/howto/volumes.html for details. -T: Starting network proxy to cluster by swapping out Deployment hello-world with a proxy -T: Forwarding remote port 8000 to local port 8000. - -T: Setup complete. Launching your container. - * Serving Flask app "server" (lazy loading) -[...] -``` - -We can test this out. In another terminal, we'll start a pod remotely on the Kubernetes cluster. - -```console -$ kubectl run curler -it --rm --image=pstauffer/curl --restart=Never -- sh -If you don't see a command prompt, try pressing enter. -/ # curl http://hello-world:8000 -Hello, world! -/ # -``` - -Let's change the message in `server.py`. At a shell prompt in the `hello-world` directory, modify the file using `sed`: - -```console -$ sed -i.bak -e s/Hello/Greetings/ server.py -[no output] -``` - -or just use your editor to change the file. The change we have made is very simple: - -```console -$ git diff -diff --git a/server.py b/server.py -index 04f15e2..7fffeb1 100644 ---- a/server.py -+++ b/server.py -@@ -1,7 +1,7 @@ - from flask import Flask - - PORT = 8000 --MESSAGE = "Hello, world!\n" -+MESSAGE = "Greetings, world!\n" - - app = Flask(__name__) - -``` - -Rerun the `curl` command from your remote pod: - -```console -/ # curl http://hello-world:8000 -Greetings, world! -/ # -``` - -Notice how the output has updated in realtime. Congratulations! You've now: - -* Routed the hello-world service to the Docker container running locally -* Configured your Docker service to pick up changes from your local filesystem -* Made a live code edit and seen it immediately reflected in production - -## How it works - -Telepresence will start a new proxy container and then call `docker run` with whatever arguments you pass to `--docker-run` to start a container that will have its networking proxied. All networking is proxied: - -* Outgoing to Kubernetes. -* Outgoing to cloud resources outside the cluster -* Incoming connections from the cluster to ports specified with `--expose`. - -Volumes and environment variables from the remote `Deployment` are also available in the container. - -## Cleaning up and next step - -* Quit your remote pod shell (`exit`) to clean up that pod. -* Press Ctrl-C at your Telepresence terminal. Telepresence will swap the deployment back to its original state. -* In a real development situation, you would commit your development work and let CI do its thing. Or build and deploy your changes however you normally would. - - - - diff --git a/docs/v1/tutorials/hello-world.yaml b/docs/v1/tutorials/hello-world.yaml deleted file mode 100644 index 00966164..00000000 --- a/docs/v1/tutorials/hello-world.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world -spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: datawire/hello-world - name: hello-world - ports: - - containerPort: 8000 - ---- - -apiVersion: v1 -kind: Service -metadata: - labels: - app: hello-world - name: hello-world -spec: - ports: - - port: 8000 - protocol: TCP - targetPort: 8000 - selector: - app: hello-world diff --git a/docs/v1/tutorials/intellij.md b/docs/v1/tutorials/intellij.md deleted file mode 100644 index 3d91eeab..00000000 --- a/docs/v1/tutorials/intellij.md +++ /dev/null @@ -1,47 +0,0 @@ -# Debugging a Java Rate Limiter Service using Telepresence and IntelliJ IDEA - -## Debugging Locally Against a Remote Kubernetes Cluster using Telepresence - -The fundamental benefit of Telepresence is that it provides a [two-way proxy](../../discussion/why-telepresence#a-fast-development-cycle-with-telepresence) between your local machine and the remote cluster. This means that you can run a service locally (and all of your local debug tooling) and have this service interact with all the other services in the remote cluster. This allows you to make a request against a service running (and exposed) in the remote cluster and proxy a call to a downstream dependent service to your local machine. You can then inspect and modify the request before providing the response from your local machine back into the calling remote service. - -### A Brief Video Guide - -The video below provides more information, and also demonstrates how to debug a locally run Rate Limiter service (that is integrated with a remotely deployed Ambassador API Gateway) via Telepresence and IntelliJ IDEA. All of the example code can be found in my [Docker Java Shopping](https://github.com/danielbryantuk/oreilly-docker-java-shopping) sample microservices-based application on GitHub. - -https://youtu.be/74ZJ1GKoZiU - -### Video Instruction TL;DR - -Once you have provisioned a remote Kubernetes cluster you will need to provide your user account with the “cluster-admin” RBAC role and then deploy all of the services configurations within the [kubernetes-ambassador-telepresence](https://github.com/danielbryantuk/oreilly-docker-java-shopping/tree/master/kubernetes-ambassador-telepresence) directory. I’ve included my example kubectl commands below (which are executed against a remote Kubernetes cluster running via [Google’s GKE](https://cloud.google.com/kubernetes-engine/)): - -``` -$ # Assume Kubernetes cluster has been successfully provisioned -$ # -$ kubectl create clusterrolebinding my-cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud info --format="value(config.account)") -$ # -$ git clone git@github.com:danielbryantuk/oreilly-docker-java-shopping.git -$ cd oreilly-docker-java-shopping/kubernetes-ambassador-telepresence -$ kubectl apply -f . -``` - -When all of the services have been deployed successfully, you can use Telepresence to “swap” the remotely running `ratelimiter` deployment with a proxy that will forward all network communications to/from the service that you will run locally: - -``` -$ telepresence --swap-deployment ratelimiter --env-json ratelimit_env.json -``` - -You’ll notice that I have specified the `env-json` argument with a filename, which generates a `ratelimit_env.json` file that contains all the relevant Kubernetes cluster environment variables you will need for local debugging. - -## Configuring IntelliJ with Telepresence - -In order to load the generated Env File into IntelliJ, you will need to install the [Env File plugin](https://plugins.jetbrains.com/plugin/7861-env-file). This can be downloaded and installed via the JetBrains website, or you can also install it via the “Preferences -> Plugins” configuration of the IDE itself. - -With the plugin installed, you can now clone the [Ambassador Java Rate Limiter](https://github.com/danielbryantuk/ambassador-java-rate-limiter) Java code from GitHub and open this in IntelliJ. The video shows exactly how to configure IntelliJ IDEA, but the primary task is to modify the Run/Debug Configuration to load the Env File that was generated during the Telepresence `swap-deployment` command: - -![intelliJ-tutorial](https://www.datawire.io/wp-content/uploads/2018/07/intelliJ-tutorial.png) - -With the configuration updated, you can now start the local instance of the `RateLimiter` service in debug mode, and make a request against the remote Kubernetes cluster Shopfront endpoint. Once the request is made then the first breakpoint you have set should be triggered! From here you can debug the locally running service as if it was running within the remote Kubernetes cluster. - -## Looking for More Info on Rate Limiting with Ambassador and Kubernetes? - -Check out our [series on rate limiting](https://blog.getambassador.io/tagged/rate-limit-series) with the [Ambassador API Gateway](https://www.getambassador.io/). diff --git a/docs/v1/tutorials/java.md b/docs/v1/tutorials/java.md deleted file mode 100644 index 4d7803e7..00000000 --- a/docs/v1/tutorials/java.md +++ /dev/null @@ -1,117 +0,0 @@ -import * as Macros from "../macros"; - -# Local development with Java -*Author: Cesar Tron-Lozai ([@CesarTronLozai](https://twitter.com/cesarTronLozai))* - - - -### Java - -`Telepresence` can help you speed up your development process for any technology, as long as you deploy your service as a Docker image into a Kubernetes container. - -In this tutorial we will focus on how to setup a local development environment for a (micro)-service `Foo` written in Java. - -This is is very useful if your application is formed of many such services which cannot run on a single development machine. In which case it's easy to setup a separate Kubernetes cluster dedicated for development. - -`Telepresence` will help us locally develop our service `Foo` as if it was still inside the Kubernetes cluster. It's a win-win!! - -### Architecture - -The idea is quite simple, `Telepresence` will start a Docker container on your local machine, remove the running pod for `Foo` and replace it with a two-way proxy to your local docker container. - -If other services in your cluster want to talk to `Foo`, they'll get redirected to your local process. If your local process wants to talk to any other services running in your cluster, `Telepresence` will redirect the calls to your cluster. -It will also maintain all the environment variables defined in your deployment. It's magical. - -In order to run our Java application in a local Docker container, we can simply start a container which has Java and Maven/Gradle installed, mount the source directory to our code, and do a Maven/Gradle build. - -In this tutorial we will be using Maven and a Spring Boot project but this would work exactly the same with Gradle or any other Java Framework. - -### Building inside a docker container - -As mentioned above, the goal is to compile and run our code inside a Docker container which `Telepresence` can use to replace the pod running in your cluster. - -Let's build the command step by step. - -* `telepresence` Runs `Telepresence`! -* `--swap-deployment foo` Assumes we already have a `foo` deployment running in our clusters. For different options check the documentation! -* `--docker-run` Tells `Telepresence` to run a Docker containers -* `--rm` Tells Docker to discard our image when it terminates (no need to clutter your computer) -* `-v$(pwd):/build` Mounts the current directory (result of the `pwd` command) into a `/build` folder inside the Docker container. This is where your source code will be; in this case used by Maven. -* `-v $HOME/.m2/repository:/m2` Mounts the Maven cache folder so we don't have to download Maven artifacts every time we run the container -* `-p 8080:8080` That's optional. If your container is running a server on, for example, port `8080`, you can map that port if you need to make requests to your service directly -* `maven:3.5.3-jdk-8-slim` That's the image which we will use to build and run our service. Here this is a prebuilt image containing Maven and Java 8. Use any other image to match your need -* `mvn -Dmaven.repo.local=/m2 -f /build spring-boot:run` Command to be run in the Docker container. Here it uses the Spring Boot Maven plugin but you can use whatever command required by your build tool. It tells maven to point to the mounted repository cache and where the source code located - -And that's it! You can easily create a `telepresence.sh` file in the root of your project with the following: - -> telepresence.sh -> ```bash -> telepresence --swap-deployment foo --docker-run --rm -v$(pwd):/build -v $HOME/.m2/repository:/m2 -p 8080:8080 maven-build:jdk8 mvn -Dmaven.repo.local=/m2 -f /build spring-boot:run -> -> ``` - -### Kubernetes Client - -For more details about how to connect Kubernetes using Kubernetes client library and a service account, check the [documentation](/tutorials/kubernetes-client-libs.html) - -### Debugging your code - -If you need to debug your code with your favourite IDE that's super easy too. You only need to pass a JVM argument and forward the remote port: - -* `-e MAVEN_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005` (JDK 9+) - `-e MAVEN_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005` (JDK 5-8) - Creates a Docker environment variable that Maven will use to set a JVM argument and awaits for a remote connection on port `5005`. -* `-p 5005:5005` Tells docker to forward that ports from your local machine. - -Then you can use your IDE to start a debug remote session on your local port `5005` - -> telepresence-debug.sh -> ```bash -> telepresence --swap-deployment foo --docker-run --rm -e MAVEN_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005 -v$(pwd):/build -v $HOME/.m2/repository:/m2 -p 8080:8080 -p 5005:5005 maven-build:jdk8 mvn -Dmaven.repo.local=/m2 -f /build spring-boot:run -> -> ``` - -### Hot code replace - -If you have a [JRebel Licence](https://zeroturnaround.com/software/jrebel/) you can also integrate it with Telepresence. - -Normally you would need to use JRebel remote when your application is running inside a Docker container. However your docker container shares the source folder so you can use that directly. - -First you need to create a `rebel.xml` file that will tell JRebel where the source code is, that is in the `/build` folder - -```xml - - - - - - - -``` - -You should copy `rebel.xml` in your `src/main/resources` folder - -If you haven't downloaded JRebel yet, do so [here](https://zeroturnaround.com/software/jrebel/download/) and unzip it somewhere. - -You can create a `JREBEL` environment variable that point to this folder. That means `$JREBEL/jrebel.jar` should be a valid file. - -To activate JRebel, you need the following: - -* `-v $JREBEL:/jrebel` Mounts the JRebel folder -* `-v $JREBEL/jrebel.jar:/jrebel.jar` Makes `jrebel.jar` available to JREBEL -* `-v $HOME/.jrebel:/root/.jrebel` Mounts your JRebel home folder, this gives access to the licence and JRebel stats. This assumes the home folder of the process in your docker image is `/root`, change if required -* `-Drun.jvmArguments="-agentpath:/jrebel/lib/libjrebel64.so"` Tells the JVM to use the Linux64 JRebel agent - -> telepresence-jrebel.sh -> ```bash -> telepresence --swap-deployment foo --docker-run --rm -v $JREBEL:/jrebel -v $JREBEL/jrebel.jar:/jrebel.jar -v $HOME/.jrebel:/root/.jrebel -v$(pwd):/build -v $HOME/.m2/repository:/m2 -p 8080:8080 maven-build:jdk8 mvn -Drun.jvmArguments="-agentpath:/jrebel/lib/libjrebel64.so" -Dmaven.repo.local=/m2 -f /build spring-boot:run -> -> ``` - -### Example -If you want to see a simple Spring boot project using telepresence have a look at https://github.com/cesartl/telepresence-k8s diff --git a/docs/v1/tutorials/kubernetes-client-libs.md b/docs/v1/tutorials/kubernetes-client-libs.md deleted file mode 100644 index bf2ffb86..00000000 --- a/docs/v1/tutorials/kubernetes-client-libs.md +++ /dev/null @@ -1,61 +0,0 @@ -import * as Macros from "../macros"; - -# Local Connection to Kubernetes Client Libraries -*Author: Guray Yildirim ([@gurayyildirim](https://twitter.com/gurayyildirim))* - - - -### Intro - -Kubernetes has client libraries in many different languages. It is not rare to have situations that require connecting Kubernetes API from your cluster and getting resources/creating new pods & deployments, ... While the list goes on, Kubernetes provide ServiceAccount objects in its RBAC to fill up this need. Still, development from local computers, testing, and debugging become a pain due to lack of direct access to the cluster API using token. - -Using Telepresence, it becomes an easy task to access ServiceAccount token seamlessly with your libraries. Here are the links for jumping: - -- [Java Kubernetes Client Local Connection](#java-kubernetes-client) -- [Python Kubernetes Client Local Connection](#python-kubernetes-client) - -### Java Kubernetes Client - -If you are using a Kubernetes client like this [one](https://github.com/fabric8io/kubernetes-client), you need to make sure the client can access service account information. This can be done with the `--mount` command introduced in `Telepresence 0.85`. - -We need to add the following to the command: - -* `--mount /tmp/known` Tells `Telepresence` to mount `TELEPRESENCE_ROOT` to a known folder -* `-v=/tmp/known/var/run/secrets:/var/run/secrets` This is another Docker mounting command to mount the known folder to `/var/run/secrets` in the local container. The [Fabric8 Kubernetes client](https://github.com/fabric8io/kubernetes-client) can find the secrets there as it would inside Kubernetes - -So our `telepresense.sh` file would look like that - -> telepresence.sh -> ```bash -> telepresence --mount /tmp/known --swap-deployment foo --docker-run --rm -v$(pwd):/build -v $HOME/.m2/repository:/m2 -v=/tmp/known/var/run/secrets:/var/run/secrets -p 8080:8080 maven-build:jdk8 mvn -Dmaven.repo.local=/m2 -f /build spring-boot:run -> -> ``` - -For more details about the `mount` command check the [documentation](/howto/volumes.html) - -### Python Kubernetes Client - -If you are using a Python Kubernetes client like [the officially supported one](https://github.com/kubernetes-client/python/), you need to make sure the client can access service account information. This can be done with the `--mount` command introduced in `Telepresence 0.85`. - -We need to add the following to the command: - -* `--mount /tmp/known` Tells `Telepresence` to mount `TELEPRESENCE_ROOT` to a known folder -* `-v=/tmp/known/var/run/secrets:/var/run/secrets` This is another Docker mounting command to mount the known folder to `/var/run/secrets` in the local container. The [Kubernetes Python client](https://github.com/kubernetes-client/python) can find the secrets there as it would inside Kubernetes. - -> telepresence.sh -> ```bash -> telepresence --mount /tmp/known --swap-deployment myapp --docker-run --rm -v$(pwd):/code -v=/tmp/known/var/run/secrets:/var/run/secrets -p 8080:8080 guray/podstatus:1.0 -> -> ``` - -The example is an API which returns list of pods in the desired namespace(*if serviceaccount is authorized*), to try it from your laptop: `curl localhost:8080/pods/default`. - -#### How it works? - -The container is running on your laptop and gets serviceaccount information like it is on the Kubernetes cluster. Afterwards if authorized, get list of the pods and returns with their status as JSON. - -For more details about the `mount` command check the [documentation](/howto/volumes.html) diff --git a/docs/v1/tutorials/kubernetes-client.md b/docs/v1/tutorials/kubernetes-client.md deleted file mode 100644 index 1b5ff7c7..00000000 --- a/docs/v1/tutorials/kubernetes-client.md +++ /dev/null @@ -1,52 +0,0 @@ -import * as Macros from "../macros"; - -# Connect to a remote Kubernetes cluster - - - -### Connecting to a remote cluster - -In this tutorial you'll see how Telepresence allows you to get transparent access to a remote cluster from a local process. -This allows you to use your local tools on your laptop to communicate with processes inside the cluster. - -You should start by running a service in the cluster: - -```console -$ kubectl create deployment myservice --image=datawire/hello-world -$ kubectl expose deployment myservice --port=8000 -$ kubectl get service myservice -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -myservice 10.0.0.12 8000/TCP 1m -``` - -It may take a minute or two for the pod running the server to be up and running, depending on how fast your cluster is. - -You can now run a local process using Telepresence that can access that service, even though the process is local but the service is running in the Kubernetes cluster: - -```console -$ telepresence --run curl http://myservice:8000/ -Hello, world! -``` - -(This will not work if the hello world pod hasn't started yet... if so, try again.) - -What's going on: - -1. Telepresence creates a new `Deployment`, which runs a proxy. -2. Telepresence runs `curl` locally in a way that proxies networking through that `Deployment`. -3. The DNS lookup and HTTP request done by `curl` get routed through the proxy and transparently access the cluster... even though `curl` is running locally. -4. When `curl` exits the new `Deployment` will be cleaned up. - -To learn more about what Telepresence proxies you can read the relevant [reference documentation](/reference/proxying.html). - - - - diff --git a/docs/v1/tutorials/kubernetes-rapid.md b/docs/v1/tutorials/kubernetes-rapid.md deleted file mode 100644 index 25ef413a..00000000 --- a/docs/v1/tutorials/kubernetes-rapid.md +++ /dev/null @@ -1,86 +0,0 @@ -import * as Macros from "../macros"; - -# Rapid development with Kubernetes - - - -### Rapid development with Telepresence - -Imagine you're developing a new Kubernetes service. -Typically the way you'd test is by changing the code, rebuilding the image, pushing the image to a Docker registry, and then redeploying the Kubernetes `Deployment`. -This can be slow. - -Or, you can use Telepresence. -Telepresence will proxy a remote `Deployment` to a process running on your machine. -That means you can develop locally, editing code as you go, but test your service inside the Kubernetes cluster. - -Let's say you're working on the following minimal server, `helloworld.py`: - -```python -#!/usr/bin/env python3 - -from http.server import BaseHTTPRequestHandler, HTTPServer - -class RequestHandler(BaseHTTPRequestHandler): - def do_GET(self): - self.send_response(200) - self.send_header('Content-type', 'text/plain') - self.end_headers() - self.wfile.write(b"Hello, world!\n") - return - -httpd = HTTPServer(('', 8080), RequestHandler) -httpd.serve_forever() -``` - -You start a proxy inside your Kubernetes cluster that will forward requests from the cluster to your local process, and in the resulting shell you start the web server: - -``` -localhost$ telepresence --new-deployment hello-world --expose 8080 -localhost$ python3 helloworld.py -``` - -This will create a new `Deployment` and `Service` named `hello-world`, which will listen on port 8080 and forward traffic to the process on your machine on port 8080. - -You can see this if you start a container inside the Kubernetes cluster and connect to that `Service`. -In a new terminal run: - -```console -localhost$ kubectl --restart=Never run -i -t --image=alpine console /bin/sh -kubernetes# wget -O - -q http://hello-world:8080/ -Hello, world! -``` - -Now, switch back to the other terminal, kill `helloworld.py` and edit it so it returns a different string. -For example: - -```console -localhost$ python3 helloworld.py -^C -localhost$ sed s/Hello/Goodbye/g -i helloworld.py -localhost$ grep Goodbye helloworld.py - self.wfile.write(b"Goodbye, world!\n") -localhost$ python3 helloworld.py -``` - -Now that we've restarted our local process with new code, we can send it another query from the other terminal where we have a shell running inside a Kubernetes pod: - -```console -kubernetes# wget -O - -q http://hello-world:8080/ -Goodbye, world! -kubernetes# exit -``` - -And there you have it: you edit your code locally, and changes are reflected immediately to clients inside the Kubernetes cluster without having to redeploy, create Docker images, and so on. - - - - diff --git a/docs/v1/tutorials/kubernetes.md b/docs/v1/tutorials/kubernetes.md deleted file mode 100644 index 42762d61..00000000 --- a/docs/v1/tutorials/kubernetes.md +++ /dev/null @@ -1,55 +0,0 @@ -import * as Macros from "../macros"; - -# Debug a Kubernetes service locally - - - - - -You should start a `Deployment` and publicly exposed `Service` like this: - -```console -$ kubectl create deployment hello-world --image=datawire/hello-world -$ kubectl expose deployment hello-world --type=LoadBalancer --port=8000 -``` - -> **If your cluster is in the cloud** you can find the address of the resulting `Service` like this: -> -> ```console -> $ kubectl get service hello-world -> NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -> hello-world 10.3.242.226 104.197.103.123 8000:30022/TCP 5d -> ``` - -> If you see `` under EXTERNAL-IP wait a few seconds and try again. -> In this case the `Service` is exposed at `http://104.197.103.123:8000/`. - -> **On `minikube` you should instead** do this to find the URL: -> -> ```console -> $ minikube service --url hello-world -> http://192.168.99.100:12345/ -> ``` - - - -```console -$ kubectl delete deployment,service hello-world -``` - -Telepresence can do much more than this: see the reference section of the documentation, on the top-left, for details. - - - - diff --git a/docs/v1/tutorials/minikube-vpn.md b/docs/v1/tutorials/minikube-vpn.md deleted file mode 100644 index 071aad87..00000000 --- a/docs/v1/tutorials/minikube-vpn.md +++ /dev/null @@ -1,54 +0,0 @@ -import * as Macros from "../macros"; - -# Minikube VPN access - - - -### Transparently connecting to Minikube - -In this tutorial you'll see how Telepresence allows you to get transparent access to Minikube networking from a local process outside of Minikube. -This allows you to use your local tools on your laptop to communicate with processes inside Minikube. - -You should start by running a service in the cluster: - -```console -$ kubectl config use-context minikube -$ kubectl run myservice --image=datawire/hello-world --port=8000 --expose -$ kubectl get service myservice -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -myservice 10.0.0.12 8000/TCP 1m -``` - -It may take a minute or two for the pod running the server to be up and running, depending on how fast your cluster is. - -You can now run a local shell using Telepresence that can access that service, even though the process is local but the service is running inside Minikube: - -```console -$ telepresence --run-shell -@minikube|$ curl http://myservice:8000/ -Hello, world! -``` - -You also have access to the same environment variables a pod in the minikube cluster would have: - -```console -@minikube|$ env | grep MYSERVICE_ -MYSERVICE_SERVICE_HOST=10.0.0.12 -MYSERVICE_SERVICE_PORT=8000 -``` - -(This will not work if the hello world pod hasn't started yet... if so, try again.) - -Telepresence will also allow services within minikube to [access a process running your host machine](../kubernetes-rapid). - - - - diff --git a/docs/v1/tutorials/openshift.md b/docs/v1/tutorials/openshift.md deleted file mode 100644 index bf9802c1..00000000 --- a/docs/v1/tutorials/openshift.md +++ /dev/null @@ -1,92 +0,0 @@ -import * as Macros from "../macros"; - -# Get started with OpenShift - -## A short introduction: accessing the cluster - -1. Install Telepresence (see below). -2. Run a service in the cluster: - - ```console - $ oc run myservice --image=datawire/hello-world --port=8000 --expose - $ oc get service myservice - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - myservice 10.0.0.12 8000/TCP 1m - ``` - - It may take a minute or two for the pod running the server to be up and running, depending on how fast your cluster is. - -3. You can now run a local process using Telepresence that can access that service, even though the process is local but the service is running in the OpenShift cluster: - - ```console - $ telepresence --run curl http://myservice:8000/ - Hello, world! - ``` - - (This will not work if the hello world pod hasn't started yet... if so, try again.) - -`curl` got access to the cluster even though it's running locally! -In the more extended tutorial that follows you'll see how you can also route traffic *to* a local process from the cluster. - -## A longer introduction: exposing a service to the cluster - - - - - -You should start a new application and publicly expose it: - -```console -$ oc new-app --docker-image=datawire/hello-world --name=hello-world -$ oc expose service hello-world -``` - -**Important:** It might be necessary to define security context constraint in order for Telepresence to run privileged -containers. Execute following using an account with sufficient permission (i.e. for local cluster login first as `system:admin`): - -```console -$ oc adm policy add-scc-to-user anyuid -z default -n hello-world -$ oc adm policy add-scc-to-user privileged -z default -n hello-world -``` - -The service will be running once the following shows a pod with `Running` status that *doesn't* have "deploy" in its name: - -```console -$ oc get pod | grep hello-world -hello-world-1-hljbs 1/1 Running 0 3m -``` - -To find the address of the resulting app you can run: - -```console -$ oc get route hello-world -NAME HOST/PORT -hello-world example.openshiftapps.com -``` - -In the above output the address is `http://example.openshiftsapps.com`, but you will get a different value. -It may take a few minutes before this route will be live; in the interim you will get an error page. -If you do wait a minute and try again. - - - -```console -$ oc delete dc,service,route,imagestream hello-world -``` - -Telepresence can do much more than this: see the reference section of the documentation, on the top-left, for details. - - - - diff --git a/docs/v1/tutorials/php.md b/docs/v1/tutorials/php.md deleted file mode 100644 index 7af83eab..00000000 --- a/docs/v1/tutorials/php.md +++ /dev/null @@ -1,137 +0,0 @@ -import * as Macros from "../macros"; - -# Local development with PHP -*Author: Solomon Roberts ([@BadgerOps](https://twitter.com/BadgerOps))* - - - -##### Note: this is heavily influenced by the awesome [Local Java development](../java) doc by Cesar Tron-Lozai ([@CesarTronLozai](https://twitter.com/cesarTronLozai)) - -## PHP - -`Telepresence` can help you speed up your development process for any technology, as long as you deploy your service as a Docker image into a Kubernetes container. - -In this tutorial we will focus on how to setup a local development environment for a (micro)-service `Bar` written in PHP. - -This is very useful if your application is formed of many such services which cannot run on a single development machine. In which case it's easy to setup a separate Kubernetes cluster dedicated for development. - -`Telepresence` will help us locally develop our service `Bar` as if it was still inside the Kubernetes cluster. It's a win-win!! - -## Architecture - -The idea is quite simple, `Telepresence` will start a Docker container on your local machine, remove the running pod for `Bar` and replace it with a two-way proxy to your local docker container. - -If other services in your cluster want to talk to `Bar`, they'll get redirected to your local process. If your local process wants to talk to any other services running in your cluster, `Telepresence` will redirect the calls to your cluster. -It will also maintain all the environment variables defined in your deployment. It's magical. - -In order to run our PHP application in a local Docker container, we can simply start a container which has PHP and Apache installed, mount the source directory to our code, and start coding! - -In this tutorial we will be using PHP 7.2 and an Apache based container. - -#### PHP-FPM NOTE: -> this could also work with PHP-FPM + Nginx, you'd just need to adjust the default xdebug port of 9000 because that will conflict with the PHP-FPM listener port. I haven't had time to test PHP-FPM + Nginx, and will update this tutorial when I'm able to. - -## Building inside a docker container - -As mentioned above, the goal is to compile and run our code inside a Docker container which `Telepresence` can use to replace the pod running in your cluster. - -Let's build the command step by step. - -* `telepresence` Runs `Telepresence`! -* `--container-to-host 9000` Forward port 9000 inside the container back to your computer's localhost on port 9000. This allows us to use xdebug for debugging and stepping through code. -* `--new-deployment bar` Create a new deployment called `bar` - you could also use ``-swap-deployment bar` if you want to test against an existing configured cluster. -* `--docker-run` Tells `Telepresence` to run a Docker containers -* `--rm` Tells Docker to discard our image when it terminates (no need to clutter your computer) -* `-v$(pwd):/var/www/html` Mounts the current directory (result of the `pwd` command) into a `/var/www/html` folder inside the Docker container. This is where your source code will be; and mounted in the container where Apache is configured to look for php/html files to serve. - * you could also specify the fully qualified path to your code repo if you don't want to execute this command in your code directory. -* `-p 8080:80` Forward Apache on `80` to http://localhost:8080 so you can hit your web service with your browser on your local computer. -* `myapp:01` The container that you are wanting to execute with the Telepresence command. - -And that's it! You can easily create a `telepresence.sh` file in the root of your project with the following: - -> telepresence.sh -> ```bash -> telepresence --container-to-host 9000 --new-deployment bar --docker-run --rm -v$(pwd):/var/www/html -p 8080:80 myapp:01 ->``` - - -## Example of how to test Telepresence + PHP - -The key piece here is the `--container-to-host 9000` (Note, if you're using php-fpm, then you'll want to forward a different port for xdebug, since php-fpm uses 9000) - -This creates a reverse connection from the container that your code is executing in back to the host machine so your xdebug listener can receive the connection. - -Create an `index.php` with the following content: - -```php - - - PHP Telepresence Demo - - - Hello World!

'; ?> - - - -``` - -Next, create a Dockerfile with the following contents: - -```dockerfile -FROM php:7.2-apache -RUN pecl install xdebug-2.6.0 -RUN docker-php-ext-enable xdebug -RUN echo "xdebug.remote_enable=1" >> /usr/local/etc/php/php.ini && \ - echo "xdebug.remote_host=localhost" >> /usr/local/etc/php/php.ini && \ - echo "xdebug.remote_port=9000" >> /usr/local/etc/php/php.ini && \ - echo "xdebug.remote_log=/var/log/xdebug.log" >> /usr/local/etc/php/php.ini - - -COPY ./index.php /var/www/html -WORKDIR /var/www/html -``` -This is taking the base PHP:7.2-apache docker container and inserting our xdebug configuration so xdebug will run when we execute our `myapp` container. - -Next, you'll build the `myapp` container: - -```bash -docker build -t myapp:01 . -``` - -Finally, execute your Telepresence command we built earlier: - -```bash -telepresence --container-to-host 9000 --new-deployment bar --docker-run --rm -v$(pwd):/var/www/html -p 8080:80 myapp:01 -``` - -And here is what example output should be (Using Telepresence build 0.102 in Oct of 2019 ) - -``` -telepresence --container-to-host 9000 --verbose --new-deployment tele-test --docker-run -p 8080:80 -v $(pwd):/var/www/html myapp:01 -T: How Telepresence uses sudo: https://www.telepresence.io/reference/install#dependencies -T: Invoking sudo. Please enter your sudo password. -Password: -T: Volumes are rooted at $TELEPRESENCE_ROOT. See https://telepresence.io/howto/volumes.html for details. -T: Starting network proxy to cluster using new Deployment tele-test - -T: No traffic is being forwarded from the remote Deployment to your local machine. You can use the --expose option to specify which ports you want to forward. - -T: Forwarding container port 9000 to host port 9000. -T: Setup complete. Launching your container. -AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 172.17.0.2. Set the 'ServerName' directive globally to suppress this message -AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 172.17.0.2. Set the 'ServerName' directive globally to suppress this message -[Thu Oct 03 17:04:35.421678 2019] [mpm_prefork:notice] [pid 7] AH00163: Apache/2.4.38 (Debian) PHP/7.2.23 configured -- resuming normal operations -[Thu Oct 03 17:04:35.422032 2019] [core:notice] [pid 7] AH00094: Command line: 'apache2 -D FOREGROUND' -``` - -We can see that the container port 9000 is forwarded to our host port 9000, and Apache launches. - -I use PHPSTorm for PHP development, so I used the [PHPStorm xdebug guide](https://www.jetbrains.com/help/phpstorm/configuring-xdebug.html) to configure my browser (with the xdebug extension) and debugger in PHPStorm. We've also set up IntelliJ IDEA to debug with the same steps. - -You should then be able to turn on your debug listener in your IDE, set a breakpoint, and navigate to `http://localhost` in your browser to load the code and hit your breakpoint! - -This tutorial adapted from a very basic example in [this blog post](https://blog.badgerops.net/debugging-a-php-app-in-kubernetes-using-telepresence-io/) - if you have any problems or questions, feel free to join the telepresence slack or reach out to [@BadgerOps](https://twitter.com/BadgerOps) on twitter. diff --git a/docs/v1/versions.yml b/docs/v1/versions.yml deleted file mode 100644 index 4e8e5b7f..00000000 --- a/docs/v1/versions.yml +++ /dev/null @@ -1 +0,0 @@ -version: "0.109" diff --git a/docs/v2.0/concepts/context-prop.md b/docs/v2.0/concepts/context-prop.md deleted file mode 100644 index 86cbe295..00000000 --- a/docs/v2.0/concepts/context-prop.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -description: "Telepresence uses context propagation to intelligently route requests, transferring request metadata across the components of a distributed system." ---- - -# Context Propagation - -Telepresence uses *context propagation* to intelligently route requests to the appropriate destination. Context propagation is transferring request metadata across the services and remote processes of a distributed system. - -This metadata is the *context* that is transferred across the system services. It commonly takes the form of HTTP headers, such that context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -The metadata *propagation* refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - -A common application for context propagation is *distributed tracing*. This is a technique for troubleshooting and profiling distributed microservices applications. In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/), which facilitates maintaining the headers though every service without being stripped (the propagation). - -Similarly, Telepresence also uses custom headers and header propagation. Our use case however is controllable intercepts and preview URLs instead of tracing. The headers facilitate the smart routing of requests either to live services in the cluster or services running on a developer’s machine. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to Ambassador Cloud with other info about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. - - - - - diff --git a/docs/v2.0/concepts/devloop.md b/docs/v2.0/concepts/devloop.md deleted file mode 100644 index 292ed13a..00000000 --- a/docs/v2.0/concepts/devloop.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -description: "Inner and outer dev loops describe the processes developers repeat to iterate on code. As these loops get more complex, productivity decreases." ---- - -# Inner and Outer Dev Loops - -Cloud native technologies also fundamentally altered the developer experience. Not only are engineers now expected to design and build distributed service-based applications, but their entire development loop has been disrupted. No longer can developers rely on monolithic application development best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner developer loop. They now have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this has a large impact on development time. - -If a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes -- 3 coding, 1 building i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code -- they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. - -![Traditional inner dev loop](../../images/trad-inner-dev-loop.png) - -If the build time is incremented to 5 minutes -- not atypical with a standard container build, registry upload, and deploy -- then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. - -![Container inner dev loop](../../images/container-inner-dev-loop.png) - -Many development teams began using custom proxies to either automatically and continually sync their local development code base with a remote surrogate (enabling “live reload” in a remote cluster), or route all remote service traffic to their local services for testing. The former approach had limited value for compiled languages, and the latter often did not support collaboration within teams where multiple users want to work on the same services. - -In addition to the challenges with the inner development loop, the changing outer development loop also caused issues. Over the past 20 years, end users and customers have become more demanding, but also less sure of their requirements. Pioneered by disruptive organizations like Netflix, Spotify, and Google, this has resulted in software delivery teams needing to be capable of rapidly delivering experiments into production. Unit, integration, and component testing is still vitally important, but modern application platforms must also support the incremental release of functionality and applications to end users in order to allow testing in production. - -The traditional outer development loop for software engineers of code merge, code review, build artifact, test execution, and deploy has now evolved. A typical modern outer loop now consists of code merge, automated code review, build artifact and container, test execution, deployment, controlled (canary) release, and observation of results. If a developer doesn’t have access to self-service configuration of the release then the time taken for this outer loop increases by at least an order of magnitude e.g. 1 minute to deploy an updated canary release routing configuration versus 10 minutes to raise a ticket for a route to be modified via the platform team. diff --git a/docs/v2.0/doc-links.yml b/docs/v2.0/doc-links.yml deleted file mode 100644 index bea54777..00000000 --- a/docs/v2.0/doc-links.yml +++ /dev/null @@ -1,36 +0,0 @@ - - title: Quick Start - link: quick-start - - title: How-to Guides - items: - - title: Intercept a Service - link: howtos/intercepts - - title: Collaborating with Preview URLs - link: howtos/preview-urls - - title: Create a Demo Cluster - link: howtos/democluster - - title: Outbound Sessions - link: howtos/outbound - - title: Upgrading and Previous Versions - link: howtos/upgrading - - title: Core Concepts - items: - - title: Inner and Outer Dev Loop - link: concepts/devloop - - title: Context Propagation - link: concepts/context-prop - - title: Technical Reference - items: - - title: Architecture - link: reference/architecture - - title: Client Reference - link: reference/client - - title: Environment Variables - link: reference/environment - - title: Volume Mounts - link: reference/volume - - title: DNS Resolution - link: reference/dns - - title: FAQs - link: faqs - - title: Troubleshooting - link: troubleshooting diff --git a/docs/v2.0/faqs.md b/docs/v2.0/faqs.md deleted file mode 100644 index 99058963..00000000 --- a/docs/v2.0/faqs.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes deployment, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](../../../../feedback) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, please see [this document](../reference/environment/) for more information. - -** When using Telepresence to intercept a pod, are the associated pod volume mounts also proxied and shared with my local machine?** - -Yes, please see [this doc on using volume mounts](../reference/volume/). - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Will Telepresence be able to intercept deployments running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the Traffic Manager and Traffic Agent containers that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single Traffic Manager service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a deployment is intercepted all pods associated with this deployment will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the Traffic Manager service installed in the cluster and Traffic Agent containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established via the standard `kubectl` mechanisms and SSH tunnelling. - -** What identity providers are supported for authenticating to view a preview URL?** - -Currently GitHub is used to authenticate a user of Telepresence (triggered via the `telepresence login` command) and any viewers of a preview URL. - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](../../../../feedback) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Telepresence will be open source soon, in the meantime it is free to download. We prioritized releasing the binary as soon as possible for community feedback, but are actively working on the open sourcing logistics. - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](../../../../feedback), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/v2.0/howtos/democluster.md b/docs/v2.0/howtos/democluster.md deleted file mode 100644 index 7e56aa42..00000000 --- a/docs/v2.0/howtos/democluster.md +++ /dev/null @@ -1,17 +0,0 @@ -# Create a Demo Cluster - -Ambassador has free demo Kubernetes clusters for you to use to test out Telepresence. - -The cluster creation process will provide you with a `config` file to use with `kubectl`. If you need to install `kubectl`, please see [the Kubernetes documentation](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -After creation, the cluster will remain available for three hours, plenty of time for you to finish one of our Telepresence [quick start guides](../../quick-start/). - -## Creating a Cluster - -1. Login to [Ambassador Cloud](http://app.getambassador.io/cloud/) using your GitHub account. - -1. Click the option for **Use Our Demo Cluster**. - -1. Click **Generate Demo Cluster** in step 1 and follow the instructions to configure your `kubectl`. - -1. Begin the [quick start guide](../../quick-start/qs-node/). diff --git a/docs/v2.0/howtos/intercepts.md b/docs/v2.0/howtos/intercepts.md deleted file mode 100644 index 2556854d..00000000 --- a/docs/v2.0/howtos/intercepts.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -description: "Telepresence help you develop Kubernetes services locally without running dependent services or redeploying code updates to your cluster on every change." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; - -# Intercept a Service - -Intercepts enable you to test and debug services locally without needing to run dependent services or redeploy code updates to your cluster on every change. A typical workflow would be to run the service you wish to develop on locally, then start an intercept. Changes to the local code can then be tested immediately along side other services running in the cluster. - -When starting an intercept, Telepresence will create a preview URLs. When visiting the preview URL, your request is proxied to your ingress with a special header set. When the traffic within the cluster requests the service you are intercepting, the [Traffic Manager](../../reference/architecture) will proxy that traffic to your laptop. Other traffic entering your ingress will use the service running in the cluster as normal. - -Preview URLs are all managed through Ambassador Cloud. You must run `telepresence login` to access Ambassador Cloud and access the preview URL dashboard. From the dashboard you can see all your active intercepts, delete active intercepts, and change them between private and public for collaboration. Private preview URLs can be accessed by anyone else in the GitHub organization you select when logging in. Public URLs can be accessed by anyone who has the link. - -While preview URLs selectively proxy traffic to your laptop, you can also run an [intercept without creating a preview URL](#creating-an-intercept-without-a-preview-url), which will proxy all traffic to the service. - -For a detailed walk though on creating intercepts using our sample app, follow the quick start guide. - -## Creating an Intercept - -The following quick overview on creating an intercept assumes you have a deployment and service accessible publicly by an ingress controller and that you can run a copy of that service on your laptop. - -1. Install Telepresence if needed. - - - - - ```shell - # 1. Download the latest binary (~60 MB): - sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - - # 2. Make the binary executable: - sudo chmod a+x /usr/local/bin/telepresence - ``` - - - - - ```shell - # 1. Download the latest binary (~50 MB): - sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - - # 2. Make the binary executable: - sudo chmod a+x /usr/local/bin/telepresence - ``` - - - - -1. In your terminal run `telepresence login`. This logs you into the Ambassador Cloud, which will track your intercepts and let you share them with colleagues. - - If you are logged in and close the dashboard browser tab, you can quickly reopen it by running telepresence dashboard. - -2. Return to your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../../reference/architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Start the service on your laptop and make a change to the code that will be apparent in the browser when the service runs, such as a text or other UI change. - -4. In a new terminal window start the intercept. This will proxy requests to the cluster service to your laptop. It will also generate a preview URL, which will let you access your service from the ingress but with requests to the intercepted service proxied to your laptop. - - The intercept requires you specify the name of the deployment to be - intercepted and the port on your laptop to proxy to. - - ``` - telepresence intercept ${base_name_of_intercept} --port=${local_TCP_port} - ``` - - The name of the Deployment to be intercepted will default to the - base name of the intercept that you give, but you can specify a - different deployment name using the `--deployment` flag: - - ``` - telepresence intercept ${base_name_of_intercept} --deployment=${name_of_deployment} --port=${local_TCP_port} - ``` - - Because you're logged in (from `telepresence login` in step 2), it - will default to `--preview-url=true`, which will use Ambassador - Cloud to create a sharable preview URL for this intercept; if you - hadn't been logged in it would have defaulted to - `--preview-url=false`. In order to do this, it will prompt you for - three options. For the first, `Ingress`, Telepresence tries to - intelligently determine the ingress controller deployment and - namespace for you. If they are correct, you can hit `enter` to - accept the defaults. Set the next two options, `TLS` and `Port`, - appropriately based on your ingress service. - - Also because you're logged in, it will default to `--mechanism=http - --http-match=auto` (or just `--http-match=auto`; `--http-match` - implies `--mechanism=http`); if you hadn't been logged in it would - have defaulted to `--mechanism=tcp`. This tells it to do smart - intercepts and only intercept a subset of HTTP requests, rather - than just intercepting the entirety of all TCP connections. This - is important for working in a shared cluster with teammates, and is - important for the preview URL functionality. See `telepresence - intercept --help` for information on using `--http-match` to - customize which requests it intercepts. - -5. Open the preview URL in your browser. The page that loads will proxy requests to the intercepted service to your laptop. You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name. - -6. Switch back in your browser to the Ambassador Cloud dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview. - -7. Stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave ${full_name_of_intercept} - telepresence quit - telepresence uninstall --everything - ``` - - The resulting intercept might have a full name that is different - than the base name that you gave to `telepresence intercept` in - step 4; see the section [Specifing a namespace for an - intercept](#specifying-a-namespace-for-an-intercept) for more - information. - -## Specifying a namespace for an intercept - -The namespace of the intercepted deployment is specified using the `--namespace` option. When this option is used, and `--deployment` is not used, then the given name is interpreted as the name of the deployment and the name of the intercept will be constructed from that name and the namespace. - - ``` - telepresence intercept hello --namespace myns --port 9000 - ``` - -This will intercept a Deployment named "hello" and name the intercept -"hello-myns". In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the deployment is specified. - - ``` - telepresence intercept myhello --namespace myns --deployment hello --port 9000 - ``` -This will intercept a deployment named "hello" and name the intercept "myhello". - -## Importing Environment Variables - -Telepresence can import the environment variables from the pod that is being intercepted, see [this doc](../../reference/environment/) for more details. - -## Creating an Intercept Without a Preview URL - -If you *are not* logged into Ambassador Cloud, the following command will intercept all traffic bound to the service and proxy it to your laptop. This includes traffic coming through your ingress controller, so use this option carefully as to not disrupt production environments. - -``` -telepresence intercept ${base_name_of_intercept} --port=${local_TCP_port} -``` - -If you *are* logged into Ambassador Cloud, setting the `preview-url` flag to `false` is necessary. - -``` -telepresence intercept ${base_name_of_intercept} --port=${local_TCP_port} --preview-url=false -``` - -This will output a header that you can set on your request for that traffic to be intercepted: - -``` -$ telepresence intercept --port= --preview-url=false -Using deployment -intercepted - Intercept name: - State : ACTIVE - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -``` -$ telepresence status -Connected - Context: default (https://) - Proxy: ON (networking to the cluster is enabled) - Intercepts: 1 total - dataprocessingnodeservice: -``` - -Finally, run `telepresence leave [name of intercept]` to stop the intercept. diff --git a/docs/v2.0/howtos/outbound.md b/docs/v2.0/howtos/outbound.md deleted file mode 100644 index 3c227705..00000000 --- a/docs/v2.0/howtos/outbound.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -# Outbound Sessions - -While preview URLs are a powerful feature, there are other options to use Telepresence for proxying traffic between your laptop and the cluster. - -## Prerequistes - -It is assumed that you have the demo web app from the [tutorial](../../tutorial/) running in your cluster, but deployment names used below can be substituted for any other running deployment. - -## Proxying Outbound Traffic - -Connecting to the cluster instead of running an intercept will allow you to access cluster deployments as if your laptop was another pod in the cluster. You will be able to access other Kubernetes services using `.`, for example by curling a service from your terminal. A service running on your laptop will also be able to interact with other services on the cluster by name. - -Connecting to the cluster starts the background daemon on your machine and installs the [Traffic Manager pod](../../reference/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect`, you will be prompted for your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.0.0 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground" - Password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -1. Run `telepresence status` to confirm that you are connected to your cluster and are proxying traffic to it. - - ``` - $ telepresence status - Connected - Context: default (https://) - Proxy: ON (networking to the cluster is enabled) - Intercepts: 0 total - ``` - -1. Now try to access your service by name with `curl verylargejavaservice.default:8080`. Telepresence will route the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl verylargejavaservice.default:8080 - - - - Welcome to the EdgyCorp WebApp - ... - ``` - -3. Terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer being proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -## Controlling Outbound Connectivity - -By default, Telepresence will provide access to all Services found in all namespaces in the connected cluster. This might lead to problems if the user does not have access permissions to all namespaces via RBAC. The `--mapped-namespaces ` flag was added to give the user control over exactly which namespaces will be accessible. - -When using this option, it is important to include all namespaces containing services to be accessed and also all namespaces that contain services that those intercepted services might use. - -### Using local-only intercepts - -An intercept with the flag`--local-only` can be used to control outbound connectivity to specific namespaces. - -When developing services that have not yet been deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service is intended to be deployed so that it can access other services in that namespace without using qualified names. - - ``` - $ telepresence intercept [name of intercept] --namespace [name of namespace] --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. The intercept is deactivated just like any other intercept. - - ``` - $ telepresence leave [name of intercept] - ``` -The unqualified name access is now removed provided that no other intercept is active and using the same namespace. diff --git a/docs/v2.0/howtos/preview-urls.md b/docs/v2.0/howtos/preview-urls.md deleted file mode 100644 index 85d34b03..00000000 --- a/docs/v2.0/howtos/preview-urls.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -# Collaboration with Preview URLs - -For collaborating on development work, Telepresence generates preview URLs that you can share with your teammate or collaborators outside of our organization. This opens up new possibilities for real time development, debugging, and pair programming among increasingly distributed teams. - -Preview URLs are protected behind authentication via Ambassador Cloud, ensuring that only users in your organization can view them. A preview URL can also be set to allow public access, for sharing with outside collaborators. - -## Prerequisites - -You must have an active intercept running to your cluster with the intercepted service running on your laptop. - -Sharing a preview URL with a teammate requires you both be members of the same GitHub organization. - -> More methods of authentication will be available in future Telepresence releases, allowing for collaboration via other service organizations. - -## Sharing a Preview URL (With Teammates) - -You can collaborate with teammates by sending your preview URL to them via Slack or however you communicate. They will be asked to authenticate via GitHub if they are not already logged into Ambassador Cloud. When they visit the preview URL, they will see the intercepted service running on your laptop. Your laptop must be online and running the service for them to see the live intercept. - -## Sharing a Preview URL (With Outside Collaborators) - -To collaborate with someone outside of your GitHub organization, you must go to the Ambassador Cloud dashboard (run `telepresence dashboard` to reopen it), select the preview URL, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. Your laptop must be online and running the service for them to see the live intercept. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the intercept either from the dashboard or by running `telepresence leave ` also removes all access to the preview URL. diff --git a/docs/v2.0/howtos/upgrading.md b/docs/v2.0/howtos/upgrading.md deleted file mode 100644 index 527efa3a..00000000 --- a/docs/v2.0/howtos/upgrading.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -# Upgrading Telepresence - -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. [Running the same commands used for installation](../../quick-start/) will replace your current binary with the latest version. - -### macOS - -``` -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence && \ -sudo chmod a+x /usr/local/bin/telepresence && \ -telepresence version -``` - -### Linux - -``` -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence && \ -sudo chmod a+x /usr/local/bin/telepresence && \ -telepresence version -``` - -### Upgrading Telepresence of version 2.0.1 or older - -The traffic-manager must be uninstalled manually. This can be done using `telepresence uninstall --everything` _before_ the upgrade or by using `kubectl delete svc,deploy traffic-manager`. - -## Installing Older Versions of Telepresence - -Use the following URLs to install an older version, replacing `x.x.x` with the version you want. - -### macOS -`https://app.getambassador.io/download/tel2/linux/amd64/x.x.x/telepresence` - -### Linux -`https://app.getambassador.io/download/tel2/darwin/amd64/x.x.x/telepresence` - -
- -Use the following URLs to find the current latest version number. - -### macOS -`https://app.getambassador.io/download/tel2/linux/amd64/stable.txt` - -### Linux -`https://app.getambassador.io/download/tel2/darwin/amd64/stable.txt` diff --git a/docs/v2.0/images/apple.png b/docs/v2.0/images/apple.png deleted file mode 100644 index 8b8277f1..00000000 Binary files a/docs/v2.0/images/apple.png and /dev/null differ diff --git a/docs/v2.0/images/github-login.png b/docs/v2.0/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/v2.0/images/github-login.png and /dev/null differ diff --git a/docs/v2.0/images/linux.png b/docs/v2.0/images/linux.png deleted file mode 100644 index 1832c594..00000000 Binary files a/docs/v2.0/images/linux.png and /dev/null differ diff --git a/docs/v2.0/quick-start/TelepresenceQuickStartLanding.js b/docs/v2.0/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 537a6325..00000000 --- a/docs/v2.0/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/v2.0/quick-start/demo-node.md b/docs/v2.0/quick-start/demo-node.md deleted file mode 100644 index 8c936cc7..00000000 --- a/docs/v2.0/quick-start/demo-node.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import QSCards from './qs-cards' - -# Telepresence Quick Start - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Download the demo cluster archive](#1-download-the-demo-cluster-archive) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Check out the sample application](#3-check-out-the-sample-application) -* [4. Run a service on your laptop](#4-run-a-service-on-your-laptop) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -In this guide we'll give you **everything you need in a preconfigured demo cluster:** the Telepresence CLI, a config file for connecting to your demo cluster, and code to run a cluster service locally. - - - Already have a cluster? Switch over to a version of this guide that takes you though the same steps using your own cluster. - - -## 1. Download the demo cluster archive - -1. {window.open('https://app.getambassador.io/cloud/demo-cluster-download-popup', 'ambassador-cloud-demo-cluster', 'menubar=no,location=no,resizable=yes,scrollbars=yes,status=no,width=550,height=750'); e.preventDefault(); }} target="_blank">Sign in to Ambassador Cloud to download your demo cluster archive. The archive contains all the tools and configurations you need to complete this guide. - -2. Extract the archive file, open the `ambassador-demo-cluster` folder, and run the installer script (the commands below might vary based on where your browser saves downloaded files). - - - This step will also install some dependency packages onto your laptop using npm, you can see those packages at ambassador-demo-cluster/edgey-corp-nodejs/DataProcessingService/package.json. - - - ``` - cd ~/Downloads - unzip ambassador-demo-cluster.zip -d ambassador-demo-cluster - cd ambassador-demo-cluster - ./install.sh - ``` - -3. The demo cluster we provided already has a demo app running. List the app's services: - `kubectl get services` - - ``` - $ kubectl get services - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.43.0.1 443/TCP 14h - dataprocessingservice ClusterIP 10.43.159.239 3000/TCP 14h - verylargejavaservice ClusterIP 10.43.223.61 8080/TCP 14h - verylargedatastore ClusterIP 10.43.203.19 8080/TCP 14h - ``` - -4. Confirm that the Telepresence CLI is now installed, we expect to see that the daemons are not yet running: -`telepresence status` - - ``` - $ telepresence status - - Root Daemon: Not running - User Daemon: Not running - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence status command. - - - - You now have Telepresence installed on your workstation and a Kubernetes cluster configured in your terminal. - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster (this requires root privileges and will ask for your password): -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Check out the sample application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - -We'll use a sample app that is already installed in your demo cluster. Let's take a quick look at it's architecture before continuing. - -1. Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -2. Since you’ve already connected Telepresence to your cluster, you can access the frontend service in your browser at http://verylargejavaservice.default:8080. - -3. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Run a service on your laptop - -Now start up the DataProcessingService service on your laptop. This version of the code has the UI color set to blue instead of green. - -1. **In a new terminal window**, go the demo application directory in the extracted archive folder: - `cd edgey-corp-nodejs/DataProcessingService` - -2. Start the application: - `npm start` - - ``` - $ npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - -4. **Back in your previous terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - - Didn't work? Make sure you are working in the terminal window where you ran the script because it sets environment variables to access the demo cluster. Those variables will only will apply to that terminal session. - - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - ... - ``` - -2. Go to the frontend service again in your browser at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. The frontend `verylargejavaservice` is still running on the cluster, but it's request to the `DataProcessingService` for retrieve the color to show is being proxied by Telepresence to your laptop. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n`. The default for the fourth value is correct so hit enter to accept it - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: n - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.0/quick-start/go.md b/docs/v2.0/quick-start/go.md deleted file mode 120000 index c884a46c..00000000 --- a/docs/v2.0/quick-start/go.md +++ /dev/null @@ -1 +0,0 @@ -qs-go.md \ No newline at end of file diff --git a/docs/v2.0/quick-start/index.md b/docs/v2.0/quick-start/index.md deleted file mode 100644 index efcb65b5..00000000 --- a/docs/v2.0/quick-start/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- - description: Telepresence Quick Start. ---- - -import TelepresenceQuickStartLanding from './TelepresenceQuickStartLanding' - - diff --git a/docs/v2.0/quick-start/qs-cards.js b/docs/v2.0/quick-start/qs-cards.js deleted file mode 100644 index 80540b69..00000000 --- a/docs/v2.0/quick-start/qs-cards.js +++ /dev/null @@ -1,64 +0,0 @@ -import React from 'react'; -import { makeStyles } from '@material-ui/core/styles'; -import Paper from '@material-ui/core/Paper'; -import Grid from '@material-ui/core/Grid'; -import Typography from '@material-ui/core/Typography'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: "100%", - - - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - Collaborating - - - Use preview URLS to collaborate with your colleagues and others outside of your organization. - - - - - - - Outbound Sessions - - - While connected to the cluster, your laptop can interact with services as if it was another pod in the cluster. - - - - - - - FAQs - - - Learn more about uses cases and the technical implementation of Telepresence. - - - - -
- ); -} diff --git a/docs/v2.0/quick-start/qs-go.md b/docs/v2.0/quick-start/qs-go.md deleted file mode 100644 index 5be15170..00000000 --- a/docs/v2.0/quick-start/qs-go.md +++ /dev/null @@ -1,322 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters to use with this quick start, quickly set one up!. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - “blue” - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: all connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your GitHub account and choose your org. - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000 --mount=false` - You will be asked for your ingress; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`. - Finally, type `n` for “Use TLS”. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 --mount=false - - Confirm the ingress to use for preview URL access - Ingress service.namespace ? verylargejavaservice.default - Port ? 8080 - Use TLS y/n ? n - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.0/quick-start/qs-java.md b/docs/v2.0/quick-start/qs-java.md deleted file mode 100644 index b7dad804..00000000 --- a/docs/v2.0/quick-start/qs-java.md +++ /dev/null @@ -1,316 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters to use with this quick start, quickly set one up!. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - “blue” - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: all connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your GitHub account and choose your org. - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000 --mount=false` - You will be asked for your ingress; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`. - Finally, type `n` for “Use TLS”. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 --mount=false - - Confirm the ingress to use for preview URL access - Ingress service.namespace ? verylargejavaservice.default - Port ? 8080 - Use TLS y/n ? n - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.0/quick-start/qs-node.md b/docs/v2.0/quick-start/qs-node.md deleted file mode 100644 index bf5a3fd3..00000000 --- a/docs/v2.0/quick-start/qs-node.md +++ /dev/null @@ -1,330 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters to use with this quick start, quickly set one up!. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your GitHub account and choose your org. - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.0/quick-start/qs-python-fastapi.md b/docs/v2.0/quick-start/qs-python-fastapi.md deleted file mode 100644 index 3358aa6b..00000000 --- a/docs/v2.0/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters to use with this quick start, quickly set one up!. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - “blue” - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: all connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your GitHub account and choose your org. - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000 --mount=false` - You will be asked for your ingress; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`. - Finally, type `n` for “Use TLS”. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 --mount=false - - Confirm the ingress to use for preview URL access - Ingress service.namespace ? verylargejavaservice.default - Port ? 8080 - Use TLS y/n ? n - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.0/quick-start/qs-python.md b/docs/v2.0/quick-start/qs-python.md deleted file mode 100644 index 952cd442..00000000 --- a/docs/v2.0/quick-start/qs-python.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters to use with this quick start, quickly set one up!. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - “blue” - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: all connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your GitHub account and choose your org. - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000 --mount=false` - You will be asked for your ingress; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`. - Finally, type `n` for “Use TLS”. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 --mount=false - - Confirm the ingress to use for preview URL access - Ingress service.namespace ? verylargejavaservice.default - Port ? 8080 - Use TLS y/n ? n - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.0/quick-start/telepresence-quickstart-landing.less b/docs/v2.0/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index eb9f3489..00000000 --- a/docs/v2.0/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,148 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.doc-body .telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1, - h2 { - color: @blue-dark; - font-style: normal; - font-weight: normal; - letter-spacing: 0.25px; - } - - h1 { - font-size: 33px; - line-height: 40px; - - svg { - vertical-align: text-bottom; - } - } - - h2 { - font-size: 23px; - line-height: 33px; - margin: 0 0 1rem; - - .highlight-mark { - background: transparent; - color: @blue-dark; - background: -moz-linear-gradient(top, transparent 0%, transparent 60%, fade(@blue-electric, 15%) 60%, fade(@blue-electric, 15%) 100%); - background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,transparent), color-stop(60%,transparent), color-stop(60%,fade(@blue-electric, 15%)), color-stop(100%,fade(@blue-electric, 15%))); - background: -webkit-linear-gradient(top, transparent 0%,transparent 60%,fade(@blue-electric, 15%) 60%,fade(@blue-electric, 15%) 100%); - background: -o-linear-gradient(top, transparent 0%,transparent 60%,fade(@blue-electric, 15%) 60%,fade(@blue-electric, 15%) 100%); - background: -ms-linear-gradient(top, transparent 0%,transparent 60%,fade(@blue-electric, 15%) 60%,fade(@blue-electric, 15%) 100%); - background: linear-gradient(to bottom, transparent 0%,transparent 60%,fade(@blue-electric, 15%) 60%,fade(@blue-electric, 15%) 100%); - filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='transparent', endColorstr='fade(@blue-electric, 15%)',GradientType=0 ); - padding: 0 3px; - margin: 0 .1em 0 0; - } - } - - .telepresence-choice { - background: @white; - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - - strong { - color: @blue; - } - } - - .telepresence-choice-wrapper { - border-bottom: solid 1px @grey-separator; - column-gap: 60px; - display: inline-grid; - grid-template-columns: repeat(2, 1fr); - margin: 20px 0 50px; - padding: 0 0 62px; - width: 100%; - - .telepresence-choice { - ol { - li { - font-size: 14px; - } - } - - .get-started-button { - background-color: @green; - border-radius: 5px; - color: @white; - display: inline-flex; - font-style: normal; - font-weight: 600; - font-size: 14px; - line-height: 24px; - margin: 0 0 15px 5px; - padding: 13px 20px; - align-items: center; - letter-spacing: 1.25px; - text-decoration: none; - text-transform: uppercase; - transition: background-color 200ms linear 0ms; - - svg { - fill: @white; - height: 20px; - width: 20px; - } - - &:hover { - background-color: @green-dark; - text-decoration: none; - } - } - - - p { - font-style: normal; - font-weight: normal; - font-size: 16px; - line-height: 26px; - letter-spacing: 0.5px; - } - } - } - - .video-wrapper { - display: flex; - flex-direction: row; - - ul { - li { - font-size: 14px; - margin: 0 10px 10px 0; - } - } - - div { - &.video-container { - flex: 1 1 70%; - position: relative; - width: 100%; - padding-bottom: 39.375%; - - .video { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; - } - } - - &.description { - flex: 0 1 30%; - } - } - } -} diff --git a/docs/v2.0/redirects.yml b/docs/v2.0/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/v2.0/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/v2.0/reference/architecture.md b/docs/v2.0/reference/architecture.md deleted file mode 100644 index 477399a5..00000000 --- a/docs/v2.0/reference/architecture.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the deployment's pod(s). You can see the Traffic Agent's status by running `kubectl describe -pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/v2.0/reference/client.md b/docs/v2.0/reference/client.md deleted file mode 100644 index 5ff8e389..00000000 --- a/docs/v2.0/reference/client.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client Reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| `login` | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `preview` | Create or remove preview domains for existing intercepts | -| `status` | Shows the current connectivity status | -| `quit` | Quits the local daemon, stopping all intercepts and outbound traffic to the cluster| -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000` | -| `leave` | Stops an active intercept, for example: `telepresence leave hello` | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific deployment, the `--all-agents` flag to remove all Traffic Agents from all deployments, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. diff --git a/docs/v2.0/reference/dns.md b/docs/v2.0/reference/dns.md deleted file mode 100644 index 4f0482c1..00000000 --- a/docs/v2.0/reference/dns.md +++ /dev/null @@ -1,66 +0,0 @@ -# DNS Resolution - -The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces of currently active intercepts. Processes running locally on the desktop will have network access to all services in the such namespaces by service-name only. - -All intercepts contribute to the DNS resolver, even those that do not use the `--namespace=` option. This is because `--namespace default` is implied, and in this context, `default` is treated just like any other namespace. - -No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). - -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. - -No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. - -``` -$ telepresence connect - - Connecting to traffic manager... - Connected to context default (https://) - -$ telepresence list - - verylargejavaservice : ready to intercept (traffic-agent not yet installed) - dataprocessingservice: ready to intercept (traffic-agent not yet installed) - verylargedatastore : ready to intercept (traffic-agent not yet installed) - -$ curl verylargejavaservice:8080 - - curl: (6) Could not resolve host: verylargejavaservice - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl verylargejavaservice.default:8080 - - - - - Welcome to the EdgyCorp WebApp - ... -``` - -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. - -``` -$ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: all connections - -$ curl verylargejavaservice:8080 - - - - - Welcome to the EdgyCorp WebApp - ... -``` - -Now curling that service by its short name works and will as long as the intercept is active. - -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. diff --git a/docs/v2.0/reference/environment.md b/docs/v2.0/reference/environment.md deleted file mode 100644 index 08fa1886..00000000 --- a/docs/v2.0/reference/environment.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." ---- - -# Environment Variables - -Telepresence can import environment variables from the cluster pod when running an intercept. -You can then use these variables with the code running on your laptop of the service being intercepted. - -There are three options available to do this: - -1. `telepresence intercept --port --env-file=` - - This will write the environment variables to a Docker Compose `.env` file. This file can be used with `docker-compose` when starting containers locally. Please see the Docker documentation regarding the [file syntax](https://docs.docker.com/compose/env-file/) and [usage](https://docs.docker.com/compose/environment-variables/) for more information. - -2. `telepresence intercept --port --env-json=` - - This will write the environment variables to a JSON file. This file can be injected into other build processes. - -3. `telepresence intercept --port -- ` - - This will run a command locally with the Pod's environment variables set on your laptop. Once the command quits the intercept is stopped (as if `telepresence leave ` was run). This can be used in conjunction with a local server command, such as `python ` or `node ` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. - - Another use would be running a subshell, Bash for example: - - `telepresence intercept --port -- /bin/bash` - - This would start the intercept then launch the subshell on your laptop with all the same variables set as on the pod. diff --git a/docs/v2.0/reference/volume.md b/docs/v2.0/reference/volume.md deleted file mode 100644 index 4f22ca50..00000000 --- a/docs/v2.0/reference/volume.md +++ /dev/null @@ -1,33 +0,0 @@ -# Volume Mounts - -import Alert from '@material-ui/lab/Alert'; - -Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the intercept, this could be a subshell or local server such as Python or Node. - -``` -telepresence intercept --port --mount=/tmp/ -- /bin/bash -``` - -In this case, Telepresence creates the intercept, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. - -Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point using the `$TELEPRESENCE_ROOT` variable. - -``` -$ telepresence intercept --port --mount=true -- /bin/bash -Using deployment -intercepted - State : ACTIVE - Destination : 127.0.0.1: - Intercepting: all connections - -bash-3.2$ echo $TELEPRESENCE_ROOT -/var/folders/yh/42y5h_7s5992f80sjlv3wlgc0000gn/T/telfs-427288831 -``` - ---mount=true is the default if a mount option is not specified, use --mount=false to disable mounting volumes. - -With either method, the code you run locally either from the subshell or from the intercept command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilitze the mounted volumes. - -For example, Kubernetes mounts secrets to `/var/run/secrets`. Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets`. - -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. diff --git a/docs/v2.0/troubleshooting/index.md b/docs/v2.0/troubleshooting/index.md deleted file mode 100644 index e1ec85d6..00000000 --- a/docs/v2.0/troubleshooting/index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an Intercept Did Not Generate a Preview URL - -Preview URLs are only generated when you are logged into Ambassador Cloud, so that you can use it to manage all your preview URLs. When not logged in, the intercept will not generate a preview URL and will proxy all traffic. Remove the intercept with `telepresence leave [deployment name]`, run `telepresence login` to login to Ambassador Cloud, then recreate the intercept. See the [intercepts how-to doc](../howtos/intercepts) for more details. - -## Error on Accessing Preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on Accessing Preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Your GitHub Organization Isn't Listed - -Ambassador Cloud needs access granted to your GitHub organization as a third-party OAuth app. If an org isn't listed during login then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → **Settings** → **Applications** → **Authorized OAuth Apps** → **Ambassador Labs**. An org owner will have a **Grant** button, anyone not an owner will have **Request** which sends an email to the owner. If an access request has been denied in the past the user will not see the **Request** button, they will have to reach out to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in, you should see the GitHub org listed. - -The org owner can go to the **GitHub menu** → **Your organizations** → **[org name]** → **Settings** → **Third-party access** to see if Ambassador Labs has access already or authorize a request for access (only owners will see **Settings** on the org page). Clicking the pencil icon will show the permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or Requesting Access on Initial Login - -The first time you login to Ambassador Cloud, GitHub will ask to authorize Ambassador Labs to access your orgs and certain user data. - - - -Any listed org with a green check has already granted access to Ambassador Labs (you still need to authorize to allow Ambassador Labs to read your user data and org membership). - -Any org with a red X requires access to be granted to Ambassador Labs. Owners of the org will see a **Grant** button. Anyone who is not an owner will see a **Request** button. This will send an email to the org owner requesting approval to access the org. If an access request has been denied in the past the user will not see the **Request** button, they will have to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud then back in to select the org. - diff --git a/docs/v2.0/tutorial.md b/docs/v2.0/tutorial.md deleted file mode 100644 index 36135738..00000000 --- a/docs/v2.0/tutorial.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for your OS to install it and login to Ambassador Cloud in your browser. Follow the prompts to login with GitHub then select your organization. You will be redirected to the dashboard; later you will manage your preview URLs here. - -### macOS - -``` -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - - -### Linux - -``` -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and logging into Ambassador Cloud. The Cloud dashboard is used to manage your intercepts and share them with colleagues. You must be logged in to create selective intercepts as we are going to do here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - Confirm the ingress to use for preview URL access - Ingress service.namespace [ambassador.ambassador] ? - Port [443] ? 80 - Use TLS y/n [y] ? n - Using deployment dataprocessingnodeservice - intercepted - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting: HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp ("76a1e848-1829-74x-1138-e3294c1e9119:dataprocessingnodeservice") - Preview URL : https://[random-subdomain].preview.edgestack.me - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/v2.0/versions.yml b/docs/v2.0/versions.yml deleted file mode 100644 index 67a427dc..00000000 --- a/docs/v2.0/versions.yml +++ /dev/null @@ -1,4 +0,0 @@ -version: "2.0.3" -dlVersion: "2.0.3" -docsVersion: "2.0" -productName: "Telepresence" diff --git a/docs/v2.1/community.md b/docs/v2.1/community.md deleted file mode 100644 index aa0b6f0e..00000000 --- a/docs/v2.1/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's Guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/release/v2/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/v2.1/concepts/context-prop.md b/docs/v2.1/concepts/context-prop.md deleted file mode 100644 index 4ec09396..00000000 --- a/docs/v2.1/concepts/context-prop.md +++ /dev/null @@ -1,25 +0,0 @@ -# Context propagation - -**Context propagation** is the transfer of request metadata across the services and remote processes of a distributed system. Telepresence uses context propagation to intelligently route requests to the appropriate destination. - -This metadata is the context that is transferred across system services. It commonly takes the form of HTTP headers; context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -Metadata propagation refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - - -## What is distributed tracing? - -Distributed tracing is a technique for troubleshooting and profiling distributed microservices applications and is a common application for context propagation. It is becoming a key component for debugging. - -In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) (or another header format, such as [B3 headers](https://github.com/openzipkin/b3-propagation)), which facilitates maintaining the headers through every service without being stripped (the propagation). - - -## What are intercepts and preview URLs? - -[Intercepts](../../reference/intercepts) and [preview URLs](../../howtos/preview-urls/) are functions of Telepresence that enable easy local development from a remote Kubernetes cluster and offer a preview environment for sharing and real-time collaboration. - -Telepresence also uses custom headers and header propagation for controllable intercepts and preview URLs instead of for tracing. The headers facilitate the smart routing of requests either to live services in the cluster or services running locally on a developer’s machine. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to [Ambassador Cloud](https://app.getambassador.io) with other information about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. diff --git a/docs/v2.1/concepts/devloop.md b/docs/v2.1/concepts/devloop.md deleted file mode 100644 index fd58950e..00000000 --- a/docs/v2.1/concepts/devloop.md +++ /dev/null @@ -1,50 +0,0 @@ -# The developer experience and the inner dev loop - -## How is the developer experience changing? - -The developer experience is the workflow a developer uses to develop, test, deploy, and release software. - -Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. - -The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, [controlled (canary) release](../../../../argo/latest/concepts/canary/), and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a [GitOps workflow](../../../../argo/latest/concepts/gitops/#what-is-gitops) and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. - -Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. - -Engineers now must design and build distributed service-based applications _and_ also assume responsibility for the full development life cycle. The new developer experience means that developers can no longer rely on monolithic application developer best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner development loop. Now developers have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this adds development time to the equation. - -## What is the inner dev loop? - -The inner dev loop is the single developer workflow. A single developer should be able to set up and use an inner dev loop to code and test changes quickly. - -Even within the Kubernetes space, developers will find much of the inner dev loop familiar. That is, code can still be written locally at a level that a developer controls and committed to version control. - -In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code -- they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. - -![traditional inner dev loop](../../images/trad-inner-dev-loop.png) - -## In search of lost time: How does containerization change the inner dev loop? - -The inner dev loop is where writing and testing code happens, and time is critical for maximum developer productivity and getting features in front of end users. The faster the feedback loop, the faster developers can refactor and test again. - -Changes to the inner dev loop process, i.e., containerization, threaten to slow this development workflow down. Coding stays the same in the new inner dev loop, but code has to be containerized. The _containerized_ inner dev loop requires a number of new steps: - -* packaging code in containers -* writing a manifest to specify how Kubernetes should run the application (e.g., YAML-based configuration information, such as how much memory should be given to a container) -* pushing the container to the registry -* deploying containers in Kubernetes - -Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. - - -![container inner dev loop](../../images/container-inner-dev-loop.png) - -## Tackling the slow inner dev loop - -A slow inner dev loop can negatively impact frontend and backend teams, delaying work on individual and team levels and slowing releases into production overall. - -For example: - -* Frontend developers have to wait for previews of backend changes on a shared dev/staging environment (for example, until CI/CD deploys a new version) and/or rely on mocks/stubs/virtual services when coding their application locally. These changes are only verifiable by going through the CI/CD process to build and deploy within a target environment. -* Backend developers have to wait for CI/CD to build and deploy their app to a target environment to verify that their code works correctly with cluster or cloud-based dependencies as well as to share their work to get feedback. - -New technologies and tools can facilitate cloud-native, containerized development. And in the case of a sluggish inner dev loop, developers can accelerate productivity with tools that help speed the loop up again. diff --git a/docs/v2.1/concepts/devworkflow.md b/docs/v2.1/concepts/devworkflow.md deleted file mode 100644 index b09f186d..00000000 --- a/docs/v2.1/concepts/devworkflow.md +++ /dev/null @@ -1,7 +0,0 @@ -# The changing development workflow - -A changing workflow is one of the main challenges for developers adopting Kubernetes. Software development itself isn’t the challenge. Developers can continue to [code using the languages and tools with which they are most productive and comfortable](/resources/kubernetes-local-dev-toolkit/). That’s the beauty of containerized development. - -However, the cloud-native, Kubernetes-based approach to development means adopting a new development workflow and development environment. Beyond the basics, such as figuring out how to containerize software, [how to run containers in Kubernetes](/docs/kubernetes/latest/concepts/appdev/), and how to deploy changes into containers, for example, Kubernetes adds complexity before it delivers efficiency. The promise of a “quicker way to develop software” applies at least within the traditional aspects of the inner dev loop, where the single developer codes, builds and tests their software. But both within the inner dev loop and once code is pushed into version control to trigger the outer dev loop, the developer experience changes considerably from what many developers are used to. - -In this new paradigm, new steps are added to the inner dev loop, and more broadly, the developer begins to share responsibility for the full life cycle of their software. Inevitably this means taking new workflows and tools on board to ensure that the full life cycle continues full speed ahead. diff --git a/docs/v2.1/doc-links.yml b/docs/v2.1/doc-links.yml deleted file mode 100644 index 95e6c4bd..00000000 --- a/docs/v2.1/doc-links.yml +++ /dev/null @@ -1,52 +0,0 @@ - - title: Quick Start - link: quick-start - - title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Core Concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: "Making the remote local: Faster feedback, collaboration and debugging" - link: concepts/faster - - title: Context Propagation - link: concepts/context-prop - - title: How Do I... - items: - - title: Intercept a Service - link: howtos/intercepts - - title: Share Dev Environments with Preview URLs - link: howtos/preview-urls - - title: Proxy Outbound Traffic to My Cluster - link: howtos/outbound - - title: Technical Reference - items: - - title: Architecture - link: reference/architecture - - title: Client Reference - link: reference/client - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Environment Variables - link: reference/environment - - title: Intercepts - link: reference/intercepts - - title: Volume Mounts - link: reference/volume - - title: DNS Resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: FAQs - link: faqs - - title: Troubleshooting - link: troubleshooting - - title: Community - link: community diff --git a/docs/v2.1/faqs.md b/docs/v2.1/faqs.md deleted file mode 100644 index 3517c7f7..00000000 --- a/docs/v2.1/faqs.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](../../../../feedback) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the Traffic Manager and Traffic Agent containers that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single Traffic Manager service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the Traffic Manager service installed in the cluster and Traffic Agent containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established via the standard `kubectl` mechanisms and SSH tunnelling. - - - -** What identity providers are supported for authenticating to view a preview URL?** - -* GitHub -* GitLab -* Google - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](../../../../feedback) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Telepresence will be open source soon, in the meantime it is free to download. We prioritized releasing the binary as soon as possible for community feedback, but are actively working on the open sourcing logistics. - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](../../../../feedback), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/v2.1/howtos/intercepts.md b/docs/v2.1/howtos/intercepts.md deleted file mode 100644 index 9be2ff2c..00000000 --- a/docs/v2.1/howtos/intercepts.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Intercept a Service in Your Own Environment - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Intercept your service](#3-intercept-your-service) -* [4. Create a Preview URL to only intercept certain requests to your service](#4-create-a-preview-url-to-only-intercept-certain-requests-to-your-service) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -For a detailed walk-though on creating intercepts using our sample app, follow the quick start guide. - -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/) and [set up](https://kubernetes.io/docs/tasks/tools/install-kubectl/#verifying-kubectl-configuration) to use a Kubernetes cluster, preferably an empty test cluster. - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -This guide assumes you have a Kubernetes deployment and service accessible publicly by an ingress controller and that you can run a copy of that service on your laptop. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: - `telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: - `curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Intercept your service - -In this section, we will go through the steps required for you to intercept all traffic going to a service in your cluster and route it to your local environment instead. - -1. List the services that you can intercept with `telepresence list` and make sure the one you want to intercept is listed. - - For example, this would confirm that `example-service` can be intercepted by Telepresence: - ``` - $ telepresence list - - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -2. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example, this would show that the port `80` is named `http` in the `example-service`: - - ``` - $ kubectl get service example-service --output yaml - - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -3. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - - - For the `--port` argument, specify the port on which your local instance of your service will be running. - - If the service you are intercepting exposes more than one port, specify the one you want to intercept after a colon. - - For the `--env-file` argument, specify the path to a file on which Telepresence should write the environment variables that your service is currently running with. This is going to be useful as we start our service. - - For the example below, Telepresence will intercept traffic going to service `example-service` so that requests reaching it on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -4. Start your local environment using the environment variables retrieved in the previous step. - - Here are a few options to pass the environment variables to your local process: - - with `docker run`, provide the path to the file using the [`--env-file` argument](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) - - with JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.) use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile) - - with Visual Studio Code, specify the path to the environment variables file in the `envFile` field of your configuration - -5. Query the environment in which you intercepted a service the way you usually would and see your local instance being invoked. - - - Didn't work? Make sure the port you're listening on matches the one specified when creating your intercept. - - - - Congratulations! All the traffic usually going to your Kubernetes Service is now being routed to your local environment! - - -You can now: -- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - -## 4. Create a Preview URL to only intercept certain requests to your service - -When working on a development environment with multiple engineers, you don't want your intercepts to impact your -teammates. Ambassador Cloud automatically generates a Preview URL when creating an intercept if you are logged in. By -doing so, Telepresence can route only the requests coming from that Preview URL to your local environment; the rest will -be routed to your cluster as usual. - -1. Clean up your previous intercept by removing it: -`telepresence leave ` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept --port [:] --env-file ` - - You will be asked for the following information: - 1. **Ingress layer 3 address**: This would usually be the internal address of your ingress controller in the format `.namespace`. For example, if you have a service `ambassador-edge-stack` in the `ambassador` namespace, you would enter `ambassador-edge-stack.ambassador`. - 2. **Ingress port**: The port on which your ingress controller is listening (often 80 for non-TLS and 443 for TLS). - 3. **Ingress TLS encryption**: Whether the ingress controller is expecting TLS communication on the specified port. - 4. **Ingress layer 5 hostname**: If your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), this is the value you would need to enter here. - - - Telepresence supports any ingress controller, not just Ambassador Edge Stack. - - - For the example below, you will create a preview URL that will send traffic to the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and setting the `Host` HTTP header to `dev-environment.edgestack.me`: - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [default: -]: 443 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using Deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -4. Start your local service as in the previous step. - -5. Go to the preview URL printed after doing the intercept and see that your local service is processing the request. - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -6. Make a request on the URL you would usually query for that environment. The request should not be routed to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) will route to services in the cluster like normal. - - - Congratulations! You have now only intercepted traffic coming from your Preview URL, without impacting your teammates. - - -You can now: -- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - -...and all of this without impacting your teammates! -## What's Next? - - diff --git a/docs/v2.1/howtos/outbound.md b/docs/v2.1/howtos/outbound.md deleted file mode 100644 index 6405ff49..00000000 --- a/docs/v2.1/howtos/outbound.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Proxy Outbound Traffic to My Cluster - -While preview URLs are a powerful feature, there are other options to use Telepresence for proxying traffic between your laptop and the cluster. - - We'll assume below that you have the quick start sample web app running in your cluster so that we can test accessing the verylargejavaservice service. That service can be substituted however for any service you are running. - -## Proxying Outbound Traffic - -Connecting to the cluster instead of running an intercept will allow you to access cluster workloads as if your laptop was another pod in the cluster. You will be able to access other Kubernetes services using `.`, for example by curling a service from your terminal. A service running on your laptop will also be able to interact with other services on the cluster by name. - -Connecting to the cluster starts the background daemon on your machine and installs the [Traffic Manager pod](../../reference/architecture/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect`, you will be prompted for your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.1.4 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground /home//.cache/telepresence/logs '' ''" - [sudo] password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -1. Run `telepresence status` to confirm that you are connected to your cluster and are proxying traffic to it. - - ``` - $ telepresence status - Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" - User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 0 total - ``` - -1. Now try to access your service by name with `curl verylargejavaservice.default:8080`. Telepresence will route the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl verylargejavaservice.default:8080 - - - - Welcome to the EdgyCorp WebApp - ... - ``` - -3. Terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer being proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -When using Telepresence in this way, services must be accessed with the namespace qualified DNS name (<service name>.<namespace>) before starting an intercept. After starting an intercept, only <service name> is required. Read more about these differences in DNS resolution here. - -## Controlling Outbound Connectivity - -By default, Telepresence will provide access to all Services found in all namespaces in the connected cluster. This might lead to problems if the user does not have access permissions to all namespaces via RBAC. The `--mapped-namespaces ` flag was added to give the user control over exactly which namespaces will be accessible. - -When using this option, it is important to include all namespaces containing services to be accessed and also all namespaces that contain services that those intercepted services might use. - -### Using local-only intercepts - -An intercept with the flag`--local-only` can be used to control outbound connectivity to specific namespaces. - -When developing services that have not yet been deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service is intended to be deployed so that it can access other services in that namespace without using qualified names. - - ``` - $ telepresence intercept --namespace --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. The intercept is deactivated just like any other intercept. - - ``` - $ telepresence leave - ``` -The unqualified name access is now removed provided that no other intercept is active and using the same namespace. - -### External dependencies (formerly --also-proxy) -If you have a resource outside of the cluster that you need access to, you can leverage Headless Services to provide access. This will give you a kubernetes service formatted like all other services (`my-service.prod.svc.cluster.local`), that resolves to your resource. - -If the outside service has a DNS name, you can use the [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) service type, which will create a service that can be used from within your cluster and from your local machine when connected with telepresence. - -If the outside service is an ip, create a [service without selectors](https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors) and then create an endpoint of the same name. - -In both scenarios, Kubernetes will create a service that can be used from within your cluster and from your local machine when connected with telepresence. diff --git a/docs/v2.1/howtos/preview-urls.md b/docs/v2.1/howtos/preview-urls.md deleted file mode 100644 index b88fb20a..00000000 --- a/docs/v2.1/howtos/preview-urls.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Share Dev Environments with Preview URLs - -Telepresence can generate sharable preview URLs, allowing you to work on a copy of your service locally and share that environment directly with a teammate for pair programming. While using preview URLs Telepresence will route only the requests coming from that preview URL to your local environment; requests to the ingress will be routed to your cluster as usual. - -Preview URLs are protected behind authentication via Ambassador Cloud, ensuring that only users in your organization can view them. A preview URL can also be set to allow public access for sharing with outside collaborators. - -## Prerequisites - -* You should have the Telepresence CLI [installed](../../install/) on your laptop. - -* If you have Telepresence already installed and have used it previously, please first reset it with `telepresence uninstall --everything`. - -* You will need a service running in your cluster that you would like to intercept. - - -Need a sample app to try with preview URLs? Check out the quick start. It has a multi-service app to install in your cluster with instructions to create a preview URL for that app. - - -## Creating a Preview URL - -1. List the services that you can intercept with `telepresence list` and make sure the one you want is listed. - - If it isn't: - - * Only Deployments, ReplicaSets, or StatefulSets are supported, and each of those requires a label matching a Service - - * If the service is in a different namespace, specify it with the `--namespace` flag - -2. Login to Ambassador Cloud where you can manage and share preview URLs: -`telepresence login` - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept: -`telepresence intercept --port --env-file ` - - For `--port`, specify the port on which your local instance of your service will be running. If the service you are intercepting exposes more than one port, specify the one you want to intercept after a colon. - - For `--env-file`, specify a file path where Telepresence will write the environment variables that are set in the Pod. This is going to be useful as we start our service locally. - - You will be asked for the following information: - 1. **Ingress layer 3 address**: This would usually be the internal address of your ingress controller in the format `.namespace `. For example, if you have a service `ambassador-edge-stack` in the `ambassador` namespace, you would enter `ambassador-edge-stack.ambassador`. - 2. **Ingress port**: The port on which your ingress controller is listening (often 80 for non-TLS and 443 for TLS). - 3. **Ingress TLS encryption**: Whether the ingress controller is expecting TLS communication on the specified port. - 4. **Ingress layer 5 hostname**: If your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), enter that value here. - - For the example below, you will create a preview URL for `example-service` which listens on port 8080. The preview URL for ingress will use the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and the hostname `dev-environment.edgestack.me`: - - ``` - $ telepresence intercept example-service --port 8080 --env-file ~/ex-svc.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [default: -]: 443 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -4. Start your local environment using the environment variables retrieved in the previous step. - - Here are a few options to pass the environment variables to your local process: - - with `docker run`, provide the path to the file using the [`--env-file` argument](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) - - with JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.) use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile) - - with Visual Studio Code, specify the path to the environment variables file in the `envFile` field of your configuration - -5. Go to the preview URL that was provided after starting the intercept (the next to last line in the terminal output above). Your local service will be processing the request. - - - Success! You have intercepted traffic coming from your preview URL without impacting other traffic from your Ingress. - - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -6. Make a request on the URL you would usually query for that environment. The request should **not** be routed to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) will route to services in the cluster like normal. - -7. Share with a teammate. - - You can collaborate with teammates by sending your preview URL to them. They will be asked to log in to Ambassador Cloud if they are not already. Upon log in they must select the same identity provider and org as you are using; that is how they are authorized to access the preview URL (see the [list of supported identity providers](../../faqs/#idps)). When they visit the preview URL, they will see the intercepted service running on your laptop. - - - Congratulations! You have now created a dev environment and shared it with a teammate! While you and your partner work together to debug your service, the production version remains unchanged to the rest of your team until you commit your changes. - - -## Sharing a Preview URL with People Outside Your Team - -To collaborate with someone outside of your identity provider's organization, you must go to [Ambassador Cloud](https://app.getambassador.io/cloud/), select the preview URL, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the intercept either from the dashboard or by running `telepresence leave ` also removes all access to the preview URL. diff --git a/docs/v2.1/images/container-inner-dev-loop.png b/docs/v2.1/images/container-inner-dev-loop.png deleted file mode 100644 index 06586cd6..00000000 Binary files a/docs/v2.1/images/container-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.1/images/github-login.png b/docs/v2.1/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/v2.1/images/github-login.png and /dev/null differ diff --git a/docs/v2.1/images/logo.png b/docs/v2.1/images/logo.png deleted file mode 100644 index 701f63ba..00000000 Binary files a/docs/v2.1/images/logo.png and /dev/null differ diff --git a/docs/v2.1/images/trad-inner-dev-loop.png b/docs/v2.1/images/trad-inner-dev-loop.png deleted file mode 100644 index 618b674f..00000000 Binary files a/docs/v2.1/images/trad-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.1/install/index.md b/docs/v2.1/install/index.md deleted file mode 100644 index 2afa65c4..00000000 --- a/docs/v2.1/install/index.md +++ /dev/null @@ -1,34 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install Telepresence - -Install Telepresence by running the commands below for your OS. - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## What's Next? - -Follow one of our [quick start guides](../quick-start/) to start using Telepresence, either with our sample app or in your own environment. diff --git a/docs/v2.1/install/upgrade.md b/docs/v2.1/install/upgrade.md deleted file mode 100644 index 7fef9ca3..00000000 --- a/docs/v2.1/install/upgrade.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -import Platform from '@src/components/Platform'; - -# Upgrade Telepresence - -
-

Contents

- -* [Upgrade Process](#upgrade-process) -* [Installing Older Versions of Telepresence](#installing-older-versions-of-telepresence) -* [Migrating from Telepresence 1 to Telepresence 2](#migrating-from-telepresence-1-to-telepresence-2) - -
- -## Upgrade Process -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -After upgrading your CLI, the Traffic Manager **must be uninstalled** from your cluster. This can be done using `telepresence uninstall --everything` or by `kubectl delete svc,deploy traffic-manager`. The next time you run a `telepresence` command it will deploy an upgraded Traffic Manager. - -## Installing Older Versions of Telepresence - -Use these URLs to download an older version for your OS, replacing `x.x.x` with the version you want. - - - - -``` -https://app.getambassador.io/download/tel2/darwin/amd64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/x.y.z/telepresence -``` - - - - -## Migrating from Telepresence 1 to Telepresence 2 - -Telepresence 2 (the current major version) has different mechanics and requires a different mental model from [Telepresence 1](https://www.telepresence.io/docs/v1/) when working with local instances of your services. - -In Telepresence 1, a pod running a service is swapped with a pod running the Telepresence proxy. This proxy receives traffic intended for the service, and sends the traffic onward to the target workstation or laptop. We called this mechanism "swap-deployment". - -In practice, this mechanism, while simple in concept, had some challenges. Losing the connection to the cluster would leave the deployment in an inconsistent state. Swapping the pods would take time. - -Telepresence 2 introduces a [new architecture](../../reference/architecture/) built around "intercepts" that addresses this problem. With Telepresence 2, a sidecar proxy is injected onto the pod. The proxy then intercepts traffic intended for the pod and routes it to the workstation/laptop. The advantage of this approach is that the service is running at all times, and no swapping is used. By using the proxy approach, we can also do selective intercepts, where certain types of traffic get routed to the service while other traffic gets routed to your laptop/workstation. - -Please see [the Telepresence quick start](../../quick-start/) for an introduction to running intercepts and [the intercept reference doc](../../reference/intercepts/) for a deep dive into intercepts. diff --git a/docs/v2.1/quick-start/TelepresenceQuickStartLanding.js b/docs/v2.1/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 537a6325..00000000 --- a/docs/v2.1/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/v2.1/quick-start/demo-node.md b/docs/v2.1/quick-start/demo-node.md deleted file mode 100644 index 8c936cc7..00000000 --- a/docs/v2.1/quick-start/demo-node.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import QSCards from './qs-cards' - -# Telepresence Quick Start - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Download the demo cluster archive](#1-download-the-demo-cluster-archive) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Check out the sample application](#3-check-out-the-sample-application) -* [4. Run a service on your laptop](#4-run-a-service-on-your-laptop) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -In this guide we'll give you **everything you need in a preconfigured demo cluster:** the Telepresence CLI, a config file for connecting to your demo cluster, and code to run a cluster service locally. - - - Already have a cluster? Switch over to a version of this guide that takes you though the same steps using your own cluster. - - -## 1. Download the demo cluster archive - -1. {window.open('https://app.getambassador.io/cloud/demo-cluster-download-popup', 'ambassador-cloud-demo-cluster', 'menubar=no,location=no,resizable=yes,scrollbars=yes,status=no,width=550,height=750'); e.preventDefault(); }} target="_blank">Sign in to Ambassador Cloud to download your demo cluster archive. The archive contains all the tools and configurations you need to complete this guide. - -2. Extract the archive file, open the `ambassador-demo-cluster` folder, and run the installer script (the commands below might vary based on where your browser saves downloaded files). - - - This step will also install some dependency packages onto your laptop using npm, you can see those packages at ambassador-demo-cluster/edgey-corp-nodejs/DataProcessingService/package.json. - - - ``` - cd ~/Downloads - unzip ambassador-demo-cluster.zip -d ambassador-demo-cluster - cd ambassador-demo-cluster - ./install.sh - ``` - -3. The demo cluster we provided already has a demo app running. List the app's services: - `kubectl get services` - - ``` - $ kubectl get services - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.43.0.1 443/TCP 14h - dataprocessingservice ClusterIP 10.43.159.239 3000/TCP 14h - verylargejavaservice ClusterIP 10.43.223.61 8080/TCP 14h - verylargedatastore ClusterIP 10.43.203.19 8080/TCP 14h - ``` - -4. Confirm that the Telepresence CLI is now installed, we expect to see that the daemons are not yet running: -`telepresence status` - - ``` - $ telepresence status - - Root Daemon: Not running - User Daemon: Not running - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence status command. - - - - You now have Telepresence installed on your workstation and a Kubernetes cluster configured in your terminal. - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster (this requires root privileges and will ask for your password): -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Check out the sample application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - -We'll use a sample app that is already installed in your demo cluster. Let's take a quick look at it's architecture before continuing. - -1. Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -2. Since you’ve already connected Telepresence to your cluster, you can access the frontend service in your browser at http://verylargejavaservice.default:8080. - -3. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Run a service on your laptop - -Now start up the DataProcessingService service on your laptop. This version of the code has the UI color set to blue instead of green. - -1. **In a new terminal window**, go the demo application directory in the extracted archive folder: - `cd edgey-corp-nodejs/DataProcessingService` - -2. Start the application: - `npm start` - - ``` - $ npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - -4. **Back in your previous terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - - Didn't work? Make sure you are working in the terminal window where you ran the script because it sets environment variables to access the demo cluster. Those variables will only will apply to that terminal session. - - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - ... - ``` - -2. Go to the frontend service again in your browser at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. The frontend `verylargejavaservice` is still running on the cluster, but it's request to the `DataProcessingService` for retrieve the color to show is being proxied by Telepresence to your laptop. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n`. The default for the fourth value is correct so hit enter to accept it - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: n - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.1/quick-start/go.md b/docs/v2.1/quick-start/go.md deleted file mode 120000 index c884a46c..00000000 --- a/docs/v2.1/quick-start/go.md +++ /dev/null @@ -1 +0,0 @@ -qs-go.md \ No newline at end of file diff --git a/docs/v2.1/quick-start/index.md b/docs/v2.1/quick-start/index.md deleted file mode 100644 index efcb65b5..00000000 --- a/docs/v2.1/quick-start/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- - description: Telepresence Quick Start. ---- - -import TelepresenceQuickStartLanding from './TelepresenceQuickStartLanding' - - diff --git a/docs/v2.1/quick-start/qs-cards.js b/docs/v2.1/quick-start/qs-cards.js deleted file mode 100644 index 31582355..00000000 --- a/docs/v2.1/quick-start/qs-cards.js +++ /dev/null @@ -1,70 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - - - - - FAQs - - - - Learn more about uses cases and the technical implementation of - Telepresence. - - - - -
- ); -} diff --git a/docs/v2.1/quick-start/qs-go.md b/docs/v2.1/quick-start/qs-go.md deleted file mode 100644 index 87b5d600..00000000 --- a/docs/v2.1/quick-start/qs-go.md +++ /dev/null @@ -1,343 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.1/quick-start/qs-java.md b/docs/v2.1/quick-start/qs-java.md deleted file mode 100644 index 0b039096..00000000 --- a/docs/v2.1/quick-start/qs-java.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.1/quick-start/qs-node.md b/docs/v2.1/quick-start/qs-node.md deleted file mode 100644 index 806d9d47..00000000 --- a/docs/v2.1/quick-start/qs-node.md +++ /dev/null @@ -1,331 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.1/quick-start/qs-python-fastapi.md b/docs/v2.1/quick-start/qs-python-fastapi.md deleted file mode 100644 index 24f86037..00000000 --- a/docs/v2.1/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.1/quick-start/qs-python.md b/docs/v2.1/quick-start/qs-python.md deleted file mode 100644 index 4d79336e..00000000 --- a/docs/v2.1/quick-start/qs-python.md +++ /dev/null @@ -1,339 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.1/quick-start/telepresence-quickstart-landing.less b/docs/v2.1/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index 1a8c3ddc..00000000 --- a/docs/v2.1/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,185 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.doc-body .telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1, - h2 { - color: @blue-dark; - font-style: normal; - font-weight: normal; - letter-spacing: 0.25px; - } - - h1 { - font-size: 33px; - line-height: 40px; - - svg { - vertical-align: text-bottom; - } - } - - h2 { - font-size: 23px; - line-height: 33px; - margin: 0 0 1rem; - - .highlight-mark { - background: transparent; - color: @blue-dark; - background: -moz-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -webkit-gradient( - linear, - left top, - left bottom, - color-stop(0%, transparent), - color-stop(60%, transparent), - color-stop(60%, fade(@blue-electric, 15%)), - color-stop(100%, fade(@blue-electric, 15%)) - ); - background: -webkit-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -o-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -ms-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: linear-gradient( - to bottom, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='transparent', endColorstr='fade(@blue-electric, 15%)',GradientType=0 ); - padding: 0 3px; - margin: 0 0.1em 0 0; - } - } - - .telepresence-choice { - background: @white; - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - - strong { - color: @blue; - } - } - - .telepresence-choice-wrapper { - border-bottom: solid 1px @grey-separator; - column-gap: 60px; - display: inline-grid; - grid-template-columns: repeat(2, 1fr); - margin: 20px 0 50px; - padding: 0 0 62px; - width: 100%; - - .telepresence-choice { - ol { - li { - font-size: 14px; - } - } - - .get-started-button { - background-color: @green; - border-radius: 5px; - color: @white; - display: inline-flex; - font-style: normal; - font-weight: 600; - font-size: 14px; - line-height: 24px; - margin: 0 0 15px 5px; - padding: 13px 20px; - align-items: center; - letter-spacing: 1.25px; - text-decoration: none; - text-transform: uppercase; - transition: background-color 200ms linear 0ms; - - svg { - fill: @white; - height: 20px; - width: 20px; - } - - &:hover { - background-color: @green-dark; - text-decoration: none; - } - } - - p { - font-style: normal; - font-weight: normal; - font-size: 16px; - line-height: 26px; - letter-spacing: 0.5px; - } - } - } - - .video-wrapper { - display: flex; - flex-direction: row; - - ul { - li { - font-size: 14px; - margin: 0 10px 10px 0; - } - } - - div { - &.video-container { - flex: 1 1 70%; - position: relative; - width: 100%; - padding-bottom: 39.375%; - - .video { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; - } - } - - &.description { - flex: 0 1 30%; - } - } - } -} diff --git a/docs/v2.1/redirects.yml b/docs/v2.1/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/v2.1/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/v2.1/reference/architecture.md b/docs/v2.1/reference/architecture.md deleted file mode 100644 index 47facb0b..00000000 --- a/docs/v2.1/reference/architecture.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `kubectl describe -pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/v2.1/reference/client.md b/docs/v2.1/reference/client.md deleted file mode 100644 index db59e26a..00000000 --- a/docs/v2.1/reference/client.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client Reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| `login` | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `preview` | Create or remove [preview URLs](../../howtos/preview-urls) for existing intercepts: `telepresence preview create ` | -| `status` | Shows the current connectivity status | -| `quit` | Quits the local daemon, stopping all intercepts and outbound traffic to the cluster| -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000` | -| `leave` | Stops an active intercept: `telepresence leave hello` | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. diff --git a/docs/v2.1/reference/cluster-config.md b/docs/v2.1/reference/cluster-config.md deleted file mode 100644 index a00b1067..00000000 --- a/docs/v2.1/reference/cluster-config.md +++ /dev/null @@ -1,68 +0,0 @@ -# Cluster-side configuration - -For the most part, Telepresence doesn't require any special -configuration in the cluster, and can be used right away in any -cluster, as long as the user has adequate [permission](../rbac). - -However, some advanced features do require some configuration in the -cluster. - -# TLS - -If other applications in the cluster expect to speak TLS to your -intercepted application (perhaps you're using a service-mesh that does -mTLS), in order to use `--mechanism=http` (or any features that imply -`--mechanism=http`) you need to tell Telepresence about the TLS -certificates in use. - -Tell Telepresence about the certificates in use by adjusting your -workload's (eg. Deployment's) Pod template to set a couple of -annotations on the intercepted Pods: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ "getambassador.io/inject-terminating-tls-secret": "your-terminating-secret" # optional -+ "getambassador.io/inject-originating-tls-secret": "your-originating-secret" # optional - spec: -+ serviceAccountName: "your-account-that-has-rbac-to-read-those-secrets" - containers: -``` - -- The `getambassador.io/inject-terminating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS server - certificate to use for decrypting and responding to incoming - requests. - - When Telepresence modifies the Service's and workload's port - definitions to point at the Telepresence Agent sidecar's port - instead of your application's actual port, the sidecar will use this - certificate to terminate TLS. - -- The `getambassador.io/inject-originating-tls-secret` annotation - (optional) and names the Kubernetes Secret that contains the TLS - client certificate to use for communicating with your application. - - If your application expects incoming requests to speak TLS (eg. your - code expects to handle mTLS itself instead of letting a service-mesh - sidecar handle mTLS for it; or the port definition that Telepresence - modified pointed at the service-mesh sidecar instead of at your - application), then you will need to set this. - - If you do set this, it is usually the correct thing to set it to the - same client certificate Secret that you configure the Ambassador - Edge Stack to use for mTLS. - -It is only possible to refer to a Secret that is in the same Namespace -as the Pod. - -The Pod will need to have permission to `get` and `watch` each of -those Secrets. - -Telepresence understands `type: kubernetes.io/tls` Secrets and -`type: istio.io/key-and-cert` Secrets; as well as `type: Opaque` -Secrets that it detects to be formatted as one of those types. diff --git a/docs/v2.1/reference/config.md b/docs/v2.1/reference/config.md deleted file mode 100644 index ac81202a..00000000 --- a/docs/v2.1/reference/config.md +++ /dev/null @@ -1,32 +0,0 @@ -# Laptop-side configuration - -Telepresence uses a `config.yml` file to store and change certain values. The location of this file varies based on your OS: - -* macOS: `$HOME/Library/Application Support/telepresence/config.yml` -* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` - -For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. - -## Values - -The config file currently only supports values for the `timeouts` key, here is an example file: - -```yaml -timeouts: - agentInstall: 1m - intercept: 10s -``` - -Values are all durations either as a number respresenting seconds or a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings can be fractional (`1.5h`) or combined (`2h45m`). - -These are the valid fields for the `timeouts` key: - -|Field|Description|Default| -|---|---|---| -|`agentInstall`|Waiting for Traffic Agent to be installed|2 minutes| -|`apply`|Waiting for a Kubernetes manifest to be applied|1 minute| -|`clusterConnect`|Waiting for cluster to be connected|20 seconds| -|`intercept`|Waiting for an intercept to become active|5 seconds| -|`proxyDial`|Waiting for an outbound connection to be established|5 seconds| -|`trafficManagerConnect`|Waiting for the Traffic Manager API to connect for port fowards|20 seconds| -|`trafficManagerAPI`|Waiting for connection to the gPRC API after `trafficManagerConnect` is successful|5 seconds| diff --git a/docs/v2.1/reference/dns.md b/docs/v2.1/reference/dns.md deleted file mode 100644 index 01a5ebb3..00000000 --- a/docs/v2.1/reference/dns.md +++ /dev/null @@ -1,68 +0,0 @@ -# DNS Resolution - -The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces of currently active intercepts. Processes running locally on the desktop will have network access to all services in the such namespaces by service-name only. - -All intercepts contribute to the DNS resolver, even those that do not use the `--namespace=` option. This is because `--namespace default` is implied, and in this context, `default` is treated just like any other namespace. - -No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). - -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. - -No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. - -``` -$ telepresence connect - - Connecting to traffic manager... - Connected to context default (https://) - -$ telepresence list - - verylargejavaservice : ready to intercept (traffic-agent not yet installed) - dataprocessingservice: ready to intercept (traffic-agent not yet installed) - verylargedatastore : ready to intercept (traffic-agent not yet installed) - -$ curl verylargejavaservice:8080 - - curl: (6) Could not resolve host: verylargejavaservice - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl verylargejavaservice.default:8080 - - - - - Welcome to the EdgyCorp WebApp - ... -``` - -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. - -``` -$ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - -$ curl verylargejavaservice:8080 - - - - - Welcome to the EdgyCorp WebApp - ... -``` - -Now curling that service by its short name works and will as long as the intercept is active. - -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. diff --git a/docs/v2.1/reference/environment.md b/docs/v2.1/reference/environment.md deleted file mode 100644 index a94783d2..00000000 --- a/docs/v2.1/reference/environment.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." ---- - -# Environment Variables - -Telepresence can import environment variables from the cluster pod when running an intercept. -You can then use these variables with the code running on your laptop of the service being intercepted. - -There are three options available to do this: - -1. `telepresence intercept [service] --port [port] --env-file=FILENAME` - - This will write the environment variables to a Docker Compose `.env` file. This file can be used with `docker-compose` when starting containers locally. Please see the Docker documentation regarding the [file syntax](https://docs.docker.com/compose/env-file/) and [usage](https://docs.docker.com/compose/environment-variables/) for more information. - -2. `telepresence intercept [service] --port [port] --env-json=FILENAME` - - This will write the environment variables to a JSON file. This file can be injected into other build processes. - -3. `telepresence intercept [service] --port [port] -- [COMMAND]` - - This will run a command locally with the pod's environment variables set on your laptop. Once the command quits the intercept is stopped (as if `telepresence leave [service]` was run). This can be used in conjunction with a local server command, such as `python [FILENAME]` or `node [FILENAME]` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. - - Another use would be running a subshell, Bash for example: - - `telepresence intercept [service] --port [port] -- /bin/bash` - - This would start the intercept then launch the subshell on your laptop with all the same variables set as on the pod. diff --git a/docs/v2.1/reference/intercepts.md b/docs/v2.1/reference/intercepts.md deleted file mode 100644 index 15bad0a6..00000000 --- a/docs/v2.1/reference/intercepts.md +++ /dev/null @@ -1,126 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Intercepts - -## Intercept Behavior When Logged into Ambassador Cloud - -After logging into Ambassador Cloud (with `telepresence login`), Telepresence will default to `--preview-url=true`, which will use Ambassador Cloud to create a sharable preview URL for this intercept. (Creating an intercept without logging in will default to `--preview-url=false`). - -In order to do this, it will prompt you for four options. For the first, `Ingress`, Telepresence tries to intelligently determine the ingress controller deployment and namespace for you. If they are correct, you can hit `enter` to accept the defaults. Set the next two options, `TLS` and `Port`, appropriately based on your ingress service. The fourth is a hostname for the service, if required by your ingress. - -Also because you're logged in, Telepresence will default to `--mechanism=http --http-match=auto` (or just `--http-match=auto`; `--http-match` implies `--mechanism=http`). If you hadn't been logged in it would have defaulted to `--mechanism=tcp`. This tells it to do smart intercepts and only intercept a subset of HTTP requests, rather than just intercepting the entirety of all TCP connections. This is important for working in a shared cluster with teammates, and is important for the preview URL functionality. See `telepresence intercept --help` for information on using `--http-match` to customize which requests it intercepts. - -## Supported Workloads -Kubernetes has various [workloads](https://kubernetes.io/docs/concepts/workloads/). Currently, telepresence supports intercepting Deployments, ReplicaSets, and StatefulSets. - While many of our examples may use Deployments, they would also work on ReplicaSets and StatefulSets - -## Specifying a namespace for an intercept - -The namespace of the intercepted workload is specified using the `--namespace` option. When this option is used, and `--workload` is not used, then the given name is interpreted as the name of the workload and the name of the intercept will be constructed from that name and the namespace. - -``` -telepresence intercept hello --namespace myns --port 9000 -``` - -This will intercept a workload named "hello" and name the intercept -"hello-myns". In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the workload is specified. - -``` -telepresence intercept myhello --namespace myns --workload hello --port 9000 -``` - -This will intercept a workload named "hello" and name the intercept "myhello". - -## Importing Environment Variables - -Telepresence can import the environment variables from the pod that is being intercepted, see [this doc](../environment/) for more details. - -## Creating an Intercept Without a Preview URL - -If you *are not* logged into Ambassador Cloud, the following command will intercept all traffic bound to the service and proxy it to your laptop. This includes traffic coming through your ingress controller, so use this option carefully as to not disrupt production environments. - -``` -telepresence intercept --port= -``` - -If you *are* logged into Ambassador Cloud, setting the `preview-url` flag to `false` is necessary. - -``` -telepresence intercept --port= --preview-url=false -``` - -This will output a header that you can set on your request for that traffic to be intercepted: - -``` -$ telepresence intercept --port= --preview-url=false -Using Deployment -intercepted - Intercept name: - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -``` -$ telepresence status -Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" -User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 1 total - dataprocessingnodeservice: @ -``` - -Finally, run `telepresence leave ` to stop the intercept. - -## Creating an Intercept When a Service has Multiple Ports - -If you are trying to intercept a service that has multiple ports, you need to tell telepresence which service port you are trying to intercept. To specify, you can either use the name of the service port or the port number itself. To see which options might be available to you and your service, use kubectl to describe your service or look in the object's yaml. For more information on multiple ports, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services). - -``` -$ telepresence intercept --port=: -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -When intercepting a service that has multiple ports, the name of the service port that has been intercepted is also listed. - -If you want to change which port has been intercepted, you can create a new intercept the same way you did above and it will change which service port is being intercepted. - -## Creating an Intercept When Multiple Services Match your Workload - -Oftentimes, there's a 1-to-1 relationship between a service and a workload, so telepresence is able to auto-detect which service it should intercept based on the workload you are trying to intercept. But if you use something like [Argo](../../../../argo/latest/), it uses two services (that use the same labels) to manage traffic between a canary and a stable service. - -Fortunately, if you know which service you want to use when intercepting a workload, you can use the --service flag. So in the aforementioned demo, if you wanted to use the `echo-stable` service when intercepting your workload, your command would look like this: -``` -$ telepresence intercept echo-rollout- --port --service echo-stable -Using ReplicaSet echo-rollout- -intercepted - Intercept name : echo-rollout- - State : ACTIVE - Workload kind : ReplicaSet - Destination : 127.0.0.1:3000 - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 - Intercepting : all TCP connections -``` diff --git a/docs/v2.1/reference/rbac.md b/docs/v2.1/reference/rbac.md deleted file mode 100644 index 76103d3c..00000000 --- a/docs/v2.1/reference/rbac.md +++ /dev/null @@ -1,35 +0,0 @@ -# RBAC - -## Necessary RBAC for Users - -To use telepresence, users will need to have at least the following permissions: -``` -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -``` diff --git a/docs/v2.1/reference/volume.md b/docs/v2.1/reference/volume.md deleted file mode 100644 index 828ac058..00000000 --- a/docs/v2.1/reference/volume.md +++ /dev/null @@ -1,36 +0,0 @@ -# Volume Mounts - -import Alert from '@material-ui/lab/Alert'; - -Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the intercept, this could be a subshell or local server such as Python or Node. - -``` -telepresence intercept --port --mount=/tmp/ -- /bin/bash -``` - -In this case, Telepresence creates the intercept, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. - -Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point in the output of `telepresence list` or using the `$TELEPRESENCE_ROOT` variable. - -``` -$ telepresence intercept --port --mount=true -- /bin/bash -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 - Intercepting : all TCP connections - -bash-3.2$ echo $TELEPRESENCE_ROOT -/var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 -``` - ---mount=true is the default if a mount option is not specified, use --mount=false to disable mounting volumes. - -With either method, the code you run locally either from the subshell or from the intercept command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilitze the mounted volumes. - -For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. - -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. diff --git a/docs/v2.1/troubleshooting/index.md b/docs/v2.1/troubleshooting/index.md deleted file mode 100644 index bdfdb8c9..00000000 --- a/docs/v2.1/troubleshooting/index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an Intercept Did Not Generate a Preview URL - -Preview URLs are only generated when you are logged into Ambassador Cloud, so that you can use it to manage all your preview URLs. When not logged in, the intercept will not generate a preview URL and will proxy all traffic. Remove the intercept with `telepresence leave [deployment name]`, run `telepresence login` to login to Ambassador Cloud, then recreate the intercept. See the [intercepts how-to doc](../howtos/intercepts) for more details. - -## Error on Accessing Preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on Accessing Preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Your GitHub Organization Isn't Listed - -Ambassador Cloud needs access granted to your GitHub organization as a third-party OAuth app. If an org isn't listed during login then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → **Settings** → **Applications** → **Authorized OAuth Apps** → **Ambassador Labs**. An org owner will have a **Grant** button, anyone not an owner will have **Request** which sends an email to the owner. If an access request has been denied in the past the user will not see the **Request** button, they will have to reach out to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in, you should see the GitHub org listed. - -The org owner can go to the **GitHub menu** → **Your organizations** → **[org name]** → **Settings** → **Third-party access** to see if Ambassador Labs has access already or authorize a request for access (only owners will see **Settings** on the org page). Clicking the pencil icon will show the permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or Requesting Access on Initial Login - -When using GitHub as your identity provider, the first time you login to Ambassador Cloud GitHub will ask to authorize Ambassador Labs to access your orgs and certain user data. - - - -Any listed org with a green check has already granted access to Ambassador Labs (you still need to authorize to allow Ambassador Labs to read your user data and org membership). - -Any org with a red X requires access to be granted to Ambassador Labs. Owners of the org will see a **Grant** button. Anyone who is not an owner will see a **Request** button. This will send an email to the org owner requesting approval to access the org. If an access request has been denied in the past the user will not see the **Request** button, they will have to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud then back in to select the org. - diff --git a/docs/v2.1/tutorial.md b/docs/v2.1/tutorial.md deleted file mode 100644 index c4296ae5..00000000 --- a/docs/v2.1/tutorial.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for your OS to install it and login to Ambassador Cloud in your browser. Follow the prompts to login with GitHub then select your organization. You will be redirected to the dashboard; later you will manage your preview URLs here. - -### macOS - -``` -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - - -### Linux - -``` -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and logging into Ambassador Cloud. The Cloud dashboard is used to manage your intercepts and share them with colleagues. You must be logged in to create selective intercepts as we are going to do here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/v2.1/versions.yml b/docs/v2.1/versions.yml deleted file mode 100644 index e9bc7faa..00000000 --- a/docs/v2.1/versions.yml +++ /dev/null @@ -1,4 +0,0 @@ -version: "2.1.5" -dlVersion: "2.1.5" -docsVersion: "2.1" -productName: "Telepresence" diff --git a/docs/v2.19/README.md b/docs/v2.19/README.md new file mode 100644 index 00000000..fdbcee10 --- /dev/null +++ b/docs/v2.19/README.md @@ -0,0 +1,23 @@ +# Telepresence Documentation + +This folder contains the Telepresence documentation in a format suitable for a versioned folder in the +telepresenceio/telepresence.io repository. The folder will show up in that repository when a new minor revision +tag is created here. + +Assuming that a 2.20.0 release is pending, and that a release/v2.20.0 branch has been created, then: +```console +$ export TELEPRESENCE_VERSION=v2.20.0 +$ make prepare-release +$ git push origin {,rpc/}v2.20.0 release/v2.20.0 +``` + +will result in a `docs/v2.20` folder with this folder's contents in the telepresenceio/telepresence.io repository. + +Subsequent bugfix tags for the same minor tag, i.e.: +```console +$ export TELEPRESENCE_VERSION=v2.20.1 +$ make prepare-release +$ git push origin {,rpc/}v2.20.1 release/v2.20.1 +``` +will not result in a new folder when it is pushed, but it will update the content of the `docs/v2.20` folder to +reflect this folder's content for that tag. diff --git a/docs/2.14/community.md b/docs/v2.19/community.md similarity index 60% rename from docs/2.14/community.md rename to docs/v2.19/community.md index 922457c9..771129c9 100644 --- a/docs/2.14/community.md +++ b/docs/v2.19/community.md @@ -1,12 +1,8 @@ # Community ## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) +Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/CONTRIBUTING.md) on GitHub to learn how you can help make Telepresence better. -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - ## Meetings Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/v2.2/concepts/devloop.md b/docs/v2.19/concepts/devloop.md similarity index 89% rename from docs/v2.2/concepts/devloop.md rename to docs/v2.19/concepts/devloop.md index 886338f3..5f627922 100644 --- a/docs/v2.2/concepts/devloop.md +++ b/docs/v2.19/concepts/devloop.md @@ -1,3 +1,7 @@ +--- +title: "The developer and the inner dev loop | Ambassador " +--- + # The developer experience and the inner dev loop ## How is the developer experience changing? @@ -6,7 +10,7 @@ The developer experience is the workflow a developer uses to develop, test, depl Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. -The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, [controlled (canary) release](../../../../argo/latest/concepts/canary/), and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a [GitOps workflow](../../../../argo/latest/concepts/gitops/#what-is-gitops) and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. +The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, controlled (canary) release, and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a GitOps workflow and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. @@ -20,7 +24,7 @@ Even within the Kubernetes space, developers will find much of the inner dev loo In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code — they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. -![traditional inner dev loop](../../images/trad-inner-dev-loop.png) +![traditional inner dev loop](../images/trad-inner-dev-loop.png) ## In search of lost time: How does containerization change the inner dev loop? @@ -36,7 +40,7 @@ Changes to the inner dev loop process, i.e., containerization, threaten to slow Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. -![container inner dev loop](../../images/container-inner-dev-loop.png) +![container inner dev loop](../images/container-inner-dev-loop.png) ## Tackling the slow inner dev loop diff --git a/docs/v2.19/concepts/docker.md b/docs/v2.19/concepts/docker.md new file mode 100644 index 00000000..3e5fe879 --- /dev/null +++ b/docs/v2.19/concepts/docker.md @@ -0,0 +1,104 @@ +# Telepresence with Docker Golden Path + +## Why? + +It can be tedious to adopt Telepresence across your organization, since in its handiest form, it requires admin access, and needs to get along with any exotic +networking setup that your company may have. + +If Docker is already approved in your organization, this Golden path should be considered. + +## How? + +When using Telepresence in Docker mode, users can eliminate the need for admin access on their machines, address several networking challenges, and forego the need for third-party applications to enable volume mounts. + +You can simply add the docker flag to any Telepresence command, and it will start your daemon in a container. +Thus removing the need for root access, making it easier to adopt as an organization + +Let's illustrate with a quick demo, assuming a default Kubernetes context named default, and a simple HTTP service: + +```cli +$ telepresence connect --docker +Connected to context default (https://default.cluster.bakerstreet.io) + +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +7a0e01cab325 datawire/telepresence:2.12.1 "telepresence connec…" 18 seconds ago Up 16 seconds 127.0.0.1:58802->58802/tcp tp-default +``` + +This method limits the scope of the potential networking issues since everything stays inside Docker. The Telepresence daemon can be found under the name `tp-` when listing your containers. + +Start an intercept and a corresponding intercept-handler: + +```cli +$ telepresence intercept echo-easy --port 8080:80 --docker-run -- jmalloc/echo-server +Using Deployment echo-easy + Intercept name : echo-easy + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Service Port Identifier: proxied + Intercepting : all TCP requests +Echo server listening on port 8080. +``` + +Using `--docker-run` starts the local container that acts as the intercept handler so that it uses the same network as the container that runs the telepresence daemon. It will also +have the remote volumes mounted in the same way as the remote container that it intercepts. + +If you want to curl your remote service, you'll need to do that from a container that shares the daemon container's network. You can find the network using `telepresence status`: +```cli +$ telepresence status | grep 'Container network' + Container network : container:tp-default-default-cn +``` + +Now curl with a `docker run` that uses that network: +```cli +$ docker run --network container:tp-default-default-cn --rm curlimages/curl echo-easy + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 99 100 99 0 0 21104 0 --:--:-- --:--:-- -Request served by 4b225bc8d6f1 + +GET / HTTP/1.1 + +Host: echo-easy +Accept: */* +User-Agent: curl/8.6.0 +-:--:-- 24750 +``` + +Similarly, if you want to start your intercept handler manually using `docker run`, you must ensure that it shares the daemon container's network: + +```cli +$ docker run \ + --network=container:tp-default \ + -e PORT=8080 jmalloc/echo-server +Echo server listening on port 8080. +``` + +### Tip. Use named connections +You can use the `--name` flag to name the connection and get a shorter network name: + +``` +$ telepresence quit +$ telepresence connect --docker --name a +``` +Now, the network name will be `tp-a` instead of `tp-default-default-cn`. + +Naming is also very useful when you want to connect to several namespaces simultaneously, e.g. + +``` +$ telepresence connect --docker --name alpha --namespace alpha +$ telepresence connect --docker --name beta --namespace beta +``` + +Now, with two connections active, you must pass the flag `--use ` to other commands, e.g. +``` +$ telepresence intercept echo-easy --use alpha --port 8080:80 --docker-run -- jmalloc/echo-server +``` + +## Key learnings + +* Using the Docker mode of telepresence **does not require root access**, and makes it **easier** to adopt it across your organization. +* It **limits the potential networking issues** you can encounter. +* It **limits the potential mount issues** you can encounter. +* It **enables simultaneous intercepts in multiple namespaces**. +* It leverages **Docker** for your interceptor. diff --git a/docs/v2.1/concepts/faster.md b/docs/v2.19/concepts/faster.md similarity index 64% rename from docs/v2.1/concepts/faster.md rename to docs/v2.19/concepts/faster.md index 7aa74ad1..d748de40 100644 --- a/docs/v2.1/concepts/faster.md +++ b/docs/v2.19/concepts/faster.md @@ -1,25 +1,28 @@ +--- +title: Install the Telepresence Docker extension | Ambassador +--- # Making the remote local: Faster feedback, collaboration and debugging -With the goal of achieving [fast, efficient development](/use-case/local-kubernetes-development/), developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. +With the goal of achieving fast, efficient development, developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. ## How should I set up a Kubernetes development environment? -[Setting up a development environment](/resources/development-environments-microservices/) for Kubernetes can be much more complex than the set up for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. +Setting up a development environment for Kubernetes can be much more complex than the setup for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. -A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../../quick-start/) to facilitate fast feedback, debugging and collaboration. +A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../quick-start.md) to facilitate fast feedback, debugging and collaboration. ## What is Telepresence? -Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../../quick-start/). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. +Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../quick-start.md). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. ## How can I get fast, efficient local development? The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. -A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code, local debugging in production, and [preview URL](../../howtos/preview-urls/) functionality to share dev environments with others for multi-user collaboration. +A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code and local debugging in production. -Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. +Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP/UDP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. -The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts and preview URLs. +The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts. diff --git a/docs/v2.19/concepts/intercepts.md b/docs/v2.19/concepts/intercepts.md new file mode 100644 index 00000000..cb12d079 --- /dev/null +++ b/docs/v2.19/concepts/intercepts.md @@ -0,0 +1,94 @@ +--- +title: "Types of intercepts" +description: "Short demonstration of personal vs global intercepts" +--- + +import React from 'react'; + +import Alert from '@material-ui/lab/Alert'; +import AppBar from '@material-ui/core/AppBar'; +import Paper from '@material-ui/core/Paper'; +import Tab from '@material-ui/core/Tab'; +import TabContext from '@material-ui/lab/TabContext'; +import TabList from '@material-ui/lab/TabList'; +import TabPanel from '@material-ui/lab/TabPanel'; +import Animation from '@src/components/InterceptAnimation'; + +export function TabsContainer({ children, ...props }) { + const [state, setState] = React.useState({curTab: "personal"}); + React.useEffect(() => { + const query = new URLSearchParams(window.location.search); + var interceptType = query.get('intercept') || "regular"; + if (state.curTab != interceptType) { + setState({curTab: interceptType}); + } + }, [state, setState]) + var setURL = function(newTab) { + history.replaceState(null,null, + `?intercept=${newTab}${window.location.hash}`, + ); + }; + return ( +
+ + + {setState({curTab: newTab}); setURL(newTab)}} aria-label="intercept types"> + + + + + {children} + +
+ ); +}; + + + + +# No intercept + + + + +This is the normal operation of your cluster without Telepresence. + + + + + +# Intercept + + + + +**Intercepts** replace the Kubernetes "Orders" service with the +Orders service running on your laptop. The users see no change, but +with all the traffic coming to your laptop, you can observe and debug +with all your dev tools. + + + +### Creating and using intercepts + + 1. Creating the intercept: Intercept your service from your CLI: + + ```shell + telepresence intercept SERVICENAME + ``` + + + + Make sure your current kubectl context points to the target + cluster. If your service is running in a different namespace than + your current active context, use or change the `--namespace` flag. + + + + 2. Using the intercept: Send requests to your service: + + All requests will be sent to the version of your service that is + running in the local development environment. + + + diff --git a/docs/latest/doc-links.yml b/docs/v2.19/doc-links.yml similarity index 50% rename from docs/latest/doc-links.yml rename to docs/v2.19/doc-links.yml index ecc9da4f..f879b5f2 100644 --- a/docs/latest/doc-links.yml +++ b/docs/v2.19/doc-links.yml @@ -2,82 +2,72 @@ link: quick-start - title: Install Telepresence items: - - title: Install - link: install/ - - title: Upgrade + - title: Install Client + link: install/client/ + - title: Upgrade Client link: install/upgrade/ - title: Install Traffic Manager link: install/manager/ - - title: Install Traffic Manager with Helm - link: install/helm/ - title: Cloud Provider Prerequisites link: install/cloud/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ - title: Core concepts items: - - title: The changing development workflow - link: concepts/devworkflow - title: The developer experience and the inner dev loop - link: concepts/devloop + link: concepts/devloop/ - title: "Making the remote local: Faster feedback, collaboration and debugging" - link: concepts/faster - - title: Types of intercepts - link: concepts/intercepts + link: concepts/faster/ + - title: Intercepts + link: concepts/intercepts/ - title: How do I... items: - title: Intercept a service in your own environment - link: howtos/intercepts + link: howtos/intercepts/ - title: Proxy outbound traffic to my cluster - link: howtos/outbound + link: howtos/outbound/ + - title: Work with large clusters + link: howtos/large-clusters/ - title: Host a cluster in a local VM - link: howtos/cluster-in-vm + link: howtos/cluster-in-vm/ - title: Technical reference items: - title: Architecture - link: reference/architecture + link: reference/architecture/ - title: Client reference - link: reference/client + link: reference/client/ - title: Laptop-side configuration - link: reference/config + link: reference/config/ - title: Cluster-side configuration - link: reference/cluster-config + link: reference/cluster-config/ - title: Using Docker for intercepts - link: reference/docker-run + link: reference/docker-run/ - title: Running Telepresence in a Docker container - link: reference/inside-container + link: reference/inside-container/ - title: Environment variables - link: reference/environment + link: reference/environment/ - title: Intercepts link: reference/intercepts/ items: - title: Configure intercept using CLI - link: reference/intercepts/cli - - title: Manually injecting the Traffic Agent - link: reference/intercepts/manual-agent + link: reference/intercepts/cli/ - title: Volume mounts - link: reference/volume - - title: RESTful API service - link: reference/restapi + link: reference/volume/ - title: DNS resolution - link: reference/dns + link: reference/dns/ - title: RBAC - link: reference/rbac + link: reference/rbac/ - title: Telepresence and VPNs - link: reference/vpn + link: reference/vpn/ - title: Networking through Virtual Network Interface - link: reference/tun-device + link: reference/tun-device/ - title: Connection Routing - link: reference/routing - - title: Using Telepresence with Linkerd - link: reference/linkerd + link: reference/routing/ - title: FAQs - link: faqs + link: faqs/ - title: Troubleshooting - link: troubleshooting + link: troubleshooting/ - title: Community - link: community + link: community/ - title: Release Notes - link: release-notes + link: release-notes/ - title: Licenses - link: licenses \ No newline at end of file + link: licenses/ \ No newline at end of file diff --git a/docs/2.14/faqs.md b/docs/v2.19/faqs.md similarity index 82% rename from docs/2.14/faqs.md rename to docs/v2.19/faqs.md index c6c80a9b..50405ed3 100644 --- a/docs/2.14/faqs.md +++ b/docs/v2.19/faqs.md @@ -6,7 +6,7 @@ description: "Learn how Telepresence helps with fast development and debugging i ** Why Telepresence?** -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. +Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](concepts/devloop.md) from previous software projects is often a distant memory for cloud developers. Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. @@ -29,25 +29,23 @@ Personal intercepts require HTTP. All HTTP/1.1 and HTTP/2 protocols can be inter - gRPC - GraphQL -If you need another protocol supported, please [drop us a line](https://www.getambassador.io/feedback/) to request it. +If you need another protocol supported, please [drop us a line](https://github.com/telepresenceio/telepresence/issues/new?assignees=&labels=&projects=&template=Feature_request.md) to request it. ** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. +Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](reference/environment.md) for more information. ** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. +Yes, please see [the volume mounts reference doc](reference/volume.md) for more information. ** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. +Yes. After you have successfully connected to your cluster via `telepresence connect -n ` you will be able to access any service in the connected namespace in your cluster via their DNS name. -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. +This means you can curl endpoints directly e.g. `curl :8080/mypath`. -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. +You can also access services in other namespaces using their namespaced qualified name, e.g.`curl .:8080/mypath`. You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. @@ -78,7 +76,7 @@ A Traffic Agent container is injected per pod that is being intercepted. The fir You can run the command `telepresence helm uninstall` to remove everything from the cluster, including the `traffic-manager`, and all the `traffic-agent` containers injected into each pod being intercepted. -Also run `telepresence quit -s` to stop the local daemon running. +Also run `telepresence quit -s` to stop all local daemons running. ** What language is Telepresence written in?** @@ -89,7 +87,7 @@ All components of the Telepresence application and cluster components are writte The connection between your laptop and cluster is established by using the `kubectl port-forward` machinery (though without actually spawning a separate program) to establish a TLS encrypted connection to Telepresence -Traffic Manager in the cluster, and running Telepresence's custom VPN +Traffic Manager and Traffic Agents in the cluster, and running Telepresence's custom VPN protocol over that connection. @@ -100,4 +98,4 @@ Yes it is! You can find its source code on [GitHub](https://github.com/teleprese ** How do I share my feedback on Telepresence?** -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](https://www.getambassador.io/feedback/), or you can [join our Slack channel](http://a8r.io/slack) to share your thoughts. +Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our #telepresence-oss channel at the [CNCF Slack](https://slack.cncf.io). diff --git a/docs/v2.19/howtos/cluster-in-vm.md b/docs/v2.19/howtos/cluster-in-vm.md new file mode 100644 index 00000000..154b7503 --- /dev/null +++ b/docs/v2.19/howtos/cluster-in-vm.md @@ -0,0 +1,192 @@ +--- +title: "Considerations for locally hosted clusters | Ambassador" +description: "Use Telepresence to intercept services in a cluster running in a hosted virtual machine." +--- + +# Network considerations for locally hosted clusters + +## The problem +Telepresence creates a Virtual Network Interface ([VIF](../reference/tun-device.md)) that maps the clusters subnets to the host machine when it connects. If you're running Kubernetes locally (e.g., k3s, Minikube, Docker for Desktop), you may encounter network problems because the devices in the host are also accessible from the cluster's nodes. + +### Example: +A k3s cluster runs in a headless VirtualBox machine that uses a "host-only" network. This network will allow both host-to-guest and guest-to-host connections. In other words, the cluster will have access to the host's network and, while Telepresence is connected, also to its VIF. This means that from the cluster's perspective, there will now be more than one interface that maps the cluster's subnets; the ones already present in the cluster's nodes, and then the Telepresence VIF, mapping them again. + +Now, if a request arrives to Telepresence that is covered by a subnet mapped by the VIF, the request is routed to the cluster. If the cluster for some reason doesn't find a corresponding listener that can handle the request, it will eventually try the host network, and find the VIF. The VIF routes the request to the cluster and now the recursion is in motion. The final outcome of the request will likely be a timeout but since the recursion is very resource intensive (a large amount of very rapid connection requests), this will likely also affect other connections in a bad way. + +## Solution + +### Create a bridge network +A bridge network is a Link Layer (L2) device that forwards traffic between network segments. By creating a bridge network, you can bypass the host's network stack which enable the Kubernetes cluster to connect directly to the same router as your host. + +To create a bridge network, you need to change the network settings of the guest running a cluster's node so that it connects directly to a physical network device on your host. The details on how to configure the bridge depends on what type of virtualization solution you're using. + +### Vagrant + Virtualbox + k3s example +Here's a sample `Vagrantfile` that will spin up a server node and two agent nodes in three headless instances using a bridged network. It also adds the configuration needed for the cluster to host a docker repository (very handy in case you want to save bandwidth). The Kubernetes registry manifest must be applied using `kubectl -f registry.yaml` once the cluster is up and running. + +#### Vagrantfile +```ruby +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# bridge is the name of the host's default network device +$bridge = 'wlp5s0' + +# default_route should be the IP of the host's default route. +$default_route = '192.168.1.1' + +# nameserver must be the IP of an external DNS, such as 8.8.8.8 +$nameserver = '8.8.8.8' + +# server_name should also be added to the host's /etc/hosts file and point to the server_ip +# for easy access when pushing docker images +server_name = 'multi' + +# static IPs for the server and agents. Those IPs must be on the default router's subnet +server_ip = '192.168.1.110' +agents = { + 'agent1' => '192.168.1.111', + 'agent2' => '192.168.1.112', +} + +# Extra parameters in INSTALL_K3S_EXEC variable because of +# K3s picking up the wrong interface when starting server and agent +# https://github.com/alexellis/k3sup/issues/306 +server_script = <<-SHELL + sudo -i + apk add curl + export INSTALL_K3S_EXEC="--bind-address=#{server_ip} --node-external-ip=#{server_ip} --flannel-iface=eth1" + mkdir -p /etc/rancher/k3s + cat <<-'EOF' > /etc/rancher/k3s/registries.yaml +mirrors: + "multi:5000": + endpoint: + - "http://#{server_ip}:5000" +EOF + curl -sfL https://get.k3s.io | sh - + echo "Sleeping for 5 seconds to wait for k3s to start" + sleep 5 + cp /var/lib/rancher/k3s/server/token /vagrant_shared + cp /etc/rancher/k3s/k3s.yaml /vagrant_shared + cp /etc/rancher/k3s/registries.yaml /vagrant_shared + SHELL + +agent_script = <<-SHELL + sudo -i + apk add curl + export K3S_TOKEN_FILE=/vagrant_shared/token + export K3S_URL=https://#{server_ip}:6443 + export INSTALL_K3S_EXEC="--flannel-iface=eth1" + mkdir -p /etc/rancher/k3s + cat <<-'EOF' > /etc/rancher/k3s/registries.yaml +mirrors: + "multi:5000": + endpoint: + - "http://#{server_ip}:5000" +EOF + curl -sfL https://get.k3s.io | sh - + SHELL + +def config_vm(name, ip, script, vm) + # The network_script has two objectives: + # 1. Ensure that the guest's default route is the bridged network (bypass the network of the host) + # 2. Ensure that the DNS points to an external DNS service, as opposed to the DNS of the host that + # the NAT network provides. + network_script = <<-SHELL + sudo -i + ip route delete default 2>&1 >/dev/null || true; ip route add default via #{$default_route} + cp /etc/resolv.conf /etc/resolv.conf.orig + sed 's/^nameserver.*/nameserver #{$nameserver}/' /etc/resolv.conf.orig > /etc/resolv.conf + SHELL + + vm.hostname = name + vm.network 'public_network', bridge: $bridge, ip: ip + vm.synced_folder './shared', '/vagrant_shared' + vm.provider 'virtualbox' do |vb| + vb.memory = '4096' + vb.cpus = '2' + end + vm.provision 'shell', inline: script + vm.provision 'shell', inline: network_script, run: 'always' +end + +Vagrant.configure('2') do |config| + config.vm.box = 'generic/alpine314' + + config.vm.define 'server', primary: true do |server| + config_vm(server_name, server_ip, server_script, server.vm) + end + + agents.each do |agent_name, agent_ip| + config.vm.define agent_name do |agent| + config_vm(agent_name, agent_ip, agent_script, agent.vm) + end + end +end +``` + +The Kubernetes manifest to add the registry: + +#### registry.yaml +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-registry-v0 + namespace: kube-system + labels: + k8s-app: kube-registry + version: v0 +spec: + replicas: 1 + selector: + app: kube-registry + version: v0 + template: + metadata: + labels: + app: kube-registry + version: v0 + spec: + containers: + - name: registry + image: registry:2 + resources: + limits: + cpu: 100m + memory: 200Mi + env: + - name: REGISTRY_HTTP_ADDR + value: :5000 + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: /var/lib/registry + volumeMounts: + - name: image-store + mountPath: /var/lib/registry + ports: + - containerPort: 5000 + name: registry + protocol: TCP + volumes: + - name: image-store + hostPath: + path: /var/lib/registry-storage +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-registry + namespace: kube-system + labels: + app: kube-registry + kubernetes.io/name: "KubeRegistry" +spec: + selector: + app: kube-registry + ports: + - name: registry + port: 5000 + targetPort: 5000 + protocol: TCP + type: LoadBalancer +``` + diff --git a/docs/v2.4/howtos/intercepts.md b/docs/v2.19/howtos/intercepts.md similarity index 73% rename from docs/v2.4/howtos/intercepts.md rename to docs/v2.19/howtos/intercepts.md index ea088950..a60432f8 100644 --- a/docs/v2.4/howtos/intercepts.md +++ b/docs/v2.19/howtos/intercepts.md @@ -4,27 +4,23 @@ description: "Start using Telepresence in your own environment. Follow these ste import Alert from '@material-ui/lab/Alert'; import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' # Intercept a service in your own environment Telepresence enables you to create intercepts to a target Kubernetes workload. Once you have created and intercept, you can code and debug your associated service locally. -For a detailed walk-though on creating intercepts using our sample app, follow the [quick start guide](../../quick-start/demo-node/). - ## Prerequisites -Before you begin, you need to have [Telepresence installed](<../../install/), and either the Kubernetes command-line tool, [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), or the OpenShift Container Platform command-line interface, [`oc`](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). This document uses kubectl in all example commands. OpenShift users can substitute oc [commands instead](https://docs.openshift.com/container-platform/4.1/cli_reference/developer-cli-commands.html). +Before you begin, you need to have [Telepresence installed](../install/client.md), and either the Kubernetes command-line tool, [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), or the OpenShift Container Platform command-line interface, [`oc`](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). This document uses kubectl in all example commands. OpenShift users can substitute oc [commands instead](https://docs.openshift.com/container-platform/4.1/cli_reference/developer-cli-commands.html). This guide assumes you have a Kubernetes deployment and service accessible publicly by an ingress controller, and that you can run a copy of that service on your laptop. +## Intercept your service -## Intercept your service with a global intercept - -With Telepresence, you can create [global intercepts](../../concepts/intercepts/?intercept=global) that intercept all traffic going to a service in your cluster and route it to your local environment instead. +With Telepresence, you can create an intercept that intercepts all traffic going to a service in your cluster and route it to your local environment instead. -1. Connect to your cluster with `telepresence connect` and connect to the Kubernetes API server: +1. Connect to your cluster with `telepresence connect` and try to curl to the Kubernetes API server: ```console $ curl -ik https://kubernetes.default @@ -32,18 +28,14 @@ With Telepresence, you can create [global intercepts](../../concepts/intercepts/ Cache-Control: no-cache, private Content-Type: application/json ... - ``` - The 401 response is expected when you first connect. + The 401 response is expected. You now have access to your remote Kubernetes API server as if you were on the same network. You can now use any local tools to connect to any service in the cluster. - If you have difficulties connecting, make sure you are using Telepresence 2.0.3 or a later version. Check your version by entering `telepresence version` and [upgrade if needed](../../install/upgrade/). - - 2. Enter `telepresence list` and make sure the service you want to intercept is listed. For example: ```console @@ -88,7 +80,7 @@ With Telepresence, you can create [global intercepts](../../concepts/intercepts/ 5. Start your local environment using the environment variables retrieved in the previous step. The following are some examples of how to pass the environment variables to your local process: - * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). + * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#env). * **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. * **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). diff --git a/docs/v2.19/howtos/large-clusters.md b/docs/v2.19/howtos/large-clusters.md new file mode 100644 index 00000000..d78928b0 --- /dev/null +++ b/docs/v2.19/howtos/large-clusters.md @@ -0,0 +1,47 @@ +--- +title: "Working with large clusters" +description: "Use Telepresence to intercept services in clusters with a large number of namespaces and workloads." +--- +# Working with large clusters + +## Large number of namespaces + +### The problem +When telepresence connects to a cluster, it will configure the local DNS server so that each namespace in the cluster can be used as a top-level domain (TLD). E.g. if the cluster contains the namespace "example", then a curl for the name "my_service.example" will be directed to Telepresence DNS server, because it has announced that it wants to resolve the "example" domain. + +Telepresence tries to be conservative about what namespaces that it will create TLDs for, and first check if the namespace is accessible by the user. This check can be time-consuming in a cluster with a large number of namespaces, because each check will typically take up to a second to complete, which means that for a cluster with 120 namespaces, this check can take two minutes. That's a long time to wait when doing `telepresence connect`. + +### How to solve it + +#### Limiting at connect + +The `telepresence connect` command will accept the flag `--mapped-namespaces `, which will limit the names that Telepresence create TLDs for in the DNS resolver. This may drastically decrease the time it takes to connect, and also improve the DNS resolver's performance. + +#### Limiting the traffic-manager + +It is possible to limit the namespaces that the traffic-manager will care about when it is installed or upgraded by passing the Helm chart value `managerRbac.namespaces`. This will tell the manager to only consider those namespaces with respect to intercepts and DNS. A manager configured with `managerRbac.namespaces` creates an implicit `mapped-namespaces` set for all clients that connect to it. + +## Large number of pods + +### The problem + +A cluster with a large number of pods can be problematic in situations where the traffic-manager is unable to use its default behavior of retrieving the pod-subnets from the cluster nodes. The manager will then use a fallback method, which is to retrieve the IP of all pods and then use those IPs to calculate the pod-subnets. This in turn, might cause a very large number of requests to the Kubernetes API server. + +### The solution + +If it is RBAC permission limitations that prevent the traffic-manager from reading the `podCIDR` from the nodes, then adding the necessary permissions might help. But in many cases, the nodes will not have a `podCIDR` defined. The fallback for such cases is to specify the `podCIDRs` manually (and thus prevent the scan + calculation) using the Helm chart values: + +```yaml +podCIDRStrategy: environment +podCIDRs: + - + ... +``` + +## Use a Namespaced Scoped Traffic Manager + +Depending on use-case, it's sometimes beneficial to have several traffic-managers installed, each being responsible from a limited number of namespaces and prohibited from accessing other namespaces. A cluster can either have one single global traffic-manager, or one to many traffic-managers that are namespaced, but global and namespaced can never be combined. + +A client that connects to a namespaced manager will automatically be limited to those namespaces. + +See [Installing a namespaced-scoped traffic-manager](../install/manager.md#installing-a-namespace-scoped-traffic-manager) for details. diff --git a/docs/v2.19/howtos/outbound.md b/docs/v2.19/howtos/outbound.md new file mode 100644 index 00000000..b7c8c5d7 --- /dev/null +++ b/docs/v2.19/howtos/outbound.md @@ -0,0 +1,92 @@ +--- +description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." +--- + +import Alert from '@material-ui/lab/Alert'; + +# Proxy outbound traffic to my cluster + +Telepresence offers other options for proxying traffic between your laptop and the cluster. This section discribes how to proxy outbound traffic and control outbound connectivity to your cluster. + +## Proxying outbound traffic + +Connecting to the cluster instead of running an intercept allows you to access cluster workloads as if your laptop was another pod in the cluster. This enables you to access other Kubernetes services using `.`. A service running on your laptop can interact with other services on the cluster by name. + +When you connect to your cluster, the background daemon on your machine runs and installs the [Traffic Manager deployment](../reference/architecture.md) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. + +1. Run `telepresence connect` and enter your password to run the daemon. + + ``` + $ telepresence connect + Launching Telepresence User Daemon + Launching Telepresence Root Daemon + Connected to context kind-dev, namespace default (https://) + ``` + +2. Run `telepresence status` to confirm connection to your cluster and that it is proxying traffic. + + ``` + $ telepresence status + OSS User Daemon: Running + Version : v2.18.0 + Executable : /usr/local/bin/telepresence + Install ID : 4b1655a6-487f-4af3-a6d3-52f1bc1d1112 + Status : Connected + Kubernetes server : https:// + Kubernetes context: kind-dev + Namespace : default + Manager namespace : ambassador + Intercepts : 0 total + OSS Root Daemon: Running + Version: v2.18.0 + DNS : + Remote IP : 127.0.0.1 + Exclude suffixes: [.com .io .net .org .ru] + Include suffixes: [] + Timeout : 8s + Subnets: (2 subnets) + - 10.96.0.0/16 + - 10.244.0.0/24 + OSS Traffic Manager: Connected + Version : v2.18.0 + Traffic Agent: docker.io/datawire/tel2:2.18.0 + ``` + +3. Access your service by name with `curl web-app.emojivoto:80`. Telepresence routes the request to the cluster, as if your laptop is actually running in the cluster. + + ``` + $ curl web-app.emojivoto:80 + + + + + Emoji Vote + ... + ``` + +If you terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer proxied from your laptop. + + ``` + $ telepresence quit + Disconnected + ``` + +When using Telepresence in this way, you need to access services with the namespace qualified DNS name (<service name>.<namespace>) before you start an intercept. After you start an intercept, only <service name> is required. + +## Controlling outbound connectivity + +### Connected Namespace + +The `telepresence connect` command will connect to the default namespace, i.e. the namespace that your +current kubernetes context is configured to use, or a namespace named "default". When connected, you can +access all services in this namespace by just using a single label name of the service. + +You can specify which namespace to connect to by using a `--namespace ` to the connect command. + +### Mapped Namespaces +By default, Telepresence provides access to all Services found in all namespaces in the connected cluster. This can lead to problems if the user does not have RBAC access permissions to all namespaces. You can use the `--mapped-namespaces ` flag to control which namespaces are accessible. + +When you use the `--mapped-namespaces` flag, you need to include all namespaces containing services you want to access, as well as all namespaces that contain services related to the intercept. + +The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. +You can deactivate the intercept with `telepresence leave `. This removes unqualified name access. diff --git a/docs/v2.19/images/TP_Architecture.svg b/docs/v2.19/images/TP_Architecture.svg new file mode 100644 index 00000000..a93bdd7e --- /dev/null +++ b/docs/v2.19/images/TP_Architecture.svg @@ -0,0 +1,900 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/v2.19/images/bugfix.png b/docs/v2.19/images/bugfix.png new file mode 100644 index 00000000..7c03920b Binary files /dev/null and b/docs/v2.19/images/bugfix.png differ diff --git a/docs/v2.19/images/change.png b/docs/v2.19/images/change.png new file mode 100644 index 00000000..d271fc51 Binary files /dev/null and b/docs/v2.19/images/change.png differ diff --git a/docs/pre-release/images/container-inner-dev-loop.png b/docs/v2.19/images/container-inner-dev-loop.png similarity index 100% rename from docs/pre-release/images/container-inner-dev-loop.png rename to docs/v2.19/images/container-inner-dev-loop.png diff --git a/docs/v2.19/images/docker-header-containers.png b/docs/v2.19/images/docker-header-containers.png new file mode 100644 index 00000000..06f422a9 Binary files /dev/null and b/docs/v2.19/images/docker-header-containers.png differ diff --git a/docs/v2.19/images/feature.png b/docs/v2.19/images/feature.png new file mode 100644 index 00000000..525aa1f9 Binary files /dev/null and b/docs/v2.19/images/feature.png differ diff --git a/docs/pre-release/images/logo.png b/docs/v2.19/images/logo.png similarity index 100% rename from docs/pre-release/images/logo.png rename to docs/v2.19/images/logo.png diff --git a/docs/v2.19/images/security.png b/docs/v2.19/images/security.png new file mode 100644 index 00000000..37078d66 Binary files /dev/null and b/docs/v2.19/images/security.png differ diff --git a/docs/pre-release/images/split-tunnel.png b/docs/v2.19/images/split-tunnel.png similarity index 100% rename from docs/pre-release/images/split-tunnel.png rename to docs/v2.19/images/split-tunnel.png diff --git a/docs/v2.19/images/tracing.png b/docs/v2.19/images/tracing.png new file mode 100644 index 00000000..c374807e Binary files /dev/null and b/docs/v2.19/images/tracing.png differ diff --git a/docs/pre-release/images/trad-inner-dev-loop.png b/docs/v2.19/images/trad-inner-dev-loop.png similarity index 100% rename from docs/pre-release/images/trad-inner-dev-loop.png rename to docs/v2.19/images/trad-inner-dev-loop.png diff --git a/docs/pre-release/images/tunnelblick.png b/docs/v2.19/images/tunnelblick.png similarity index 100% rename from docs/pre-release/images/tunnelblick.png rename to docs/v2.19/images/tunnelblick.png diff --git a/docs/pre-release/images/vpn-dns.png b/docs/v2.19/images/vpn-dns.png similarity index 100% rename from docs/pre-release/images/vpn-dns.png rename to docs/v2.19/images/vpn-dns.png diff --git a/docs/v2.19/images/vpn-k8s-config.jpg b/docs/v2.19/images/vpn-k8s-config.jpg new file mode 100644 index 00000000..66116e41 Binary files /dev/null and b/docs/v2.19/images/vpn-k8s-config.jpg differ diff --git a/docs/v2.19/images/vpn-proxy-via.jpg b/docs/v2.19/images/vpn-proxy-via.jpg new file mode 100644 index 00000000..338e088b Binary files /dev/null and b/docs/v2.19/images/vpn-proxy-via.jpg differ diff --git a/docs/v2.19/images/vpn-routing.jpg b/docs/v2.19/images/vpn-routing.jpg new file mode 100644 index 00000000..18410dd4 Binary files /dev/null and b/docs/v2.19/images/vpn-routing.jpg differ diff --git a/docs/v2.19/images/vpn-with-tele.jpg b/docs/v2.19/images/vpn-with-tele.jpg new file mode 100644 index 00000000..843b253e Binary files /dev/null and b/docs/v2.19/images/vpn-with-tele.jpg differ diff --git a/docs/v2.19/install/client.md b/docs/v2.19/install/client.md new file mode 100644 index 00000000..d768e219 --- /dev/null +++ b/docs/v2.19/install/client.md @@ -0,0 +1,110 @@ +import Platform from '@src/components/Platform'; + +# Client Installation + +Install the Telepresence client on your workstation by running the commands below for your OS. + + + + +```shell +# Intel Macs + +# 1. Download the latest binary (~105 MB): +sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-amd64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence + +# Apple silicon Macs + +# 1. Ensure that no old binary exists. This is very important because Silicon macs track the executable's signature +# and just updating it in place will not work. +sudo rm -f /usr/local/bin/telepresence + +# 2. Download the latest binary (~101 MB): +sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-arm64 -o /usr/local/bin/telepresence + +# 3. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +```shell +# 1. Download the latest binary (~95 MB): +sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-linux-amd64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +We've developed a Powershell script to simplify the process of installing telepresence. Here are the commands you can execute: + +```powershell +# To install Telepresence, run the following commands +# from PowerShell as Administrator. + +# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): +Invoke-WebRequest https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-windows-amd64.zip -OutFile telepresence.zip + +# 2. Unzip the telepresence.zip file to the desired directory, then remove the zip file: +Expand-Archive -Path telepresence.zip -DestinationPath telepresenceInstaller/telepresence +Remove-Item 'telepresence.zip' +cd telepresenceInstaller/telepresence + +# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to +# C:\telepresence by default, but you can specify a custom path by passing in -Path C:\my\custom\path +powershell.exe -ExecutionPolicy bypass -c " . '.\install-telepresence.ps1';" + +# 4. Remove the unzipped directory: +cd ../.. +Remove-Item telepresenceInstaller -Recurse -Confirm:$false -Force + +# 5. Telepresence is now installed and you can use telepresence commands in PowerShell. +``` + + + + +## What's Next? + +Follow one of our [quick start guides](../quick-start.md) to start using Telepresence, either with our sample app or in your own environment. + +## Installing older versions of Telepresence + +Use these URLs to download an older version for your OS (including older nightly builds), replacing `x.y.z` with the versions you want. + + + + +```shell +# Intel Macs +https://app.getambassador.io/download/tel2oss/releases/download/vx.y.z/telepresence-darwin-amd64 + +# Apple silicon Macs +https://app.getambassador.io/download/tel2oss/releases/download/vx.y.z/telepresence-darwin-arm64 +``` + + + + +``` +https://app.getambassador.io/download/tel2oss/releases/download/vx.y.z/telepresence-linux-amd64 +``` + + + + +``` +(https://app.getambassador.io/download/tel2oss/releases/download/vx.y.z/telepresence-windows-amd64.exe +``` + + + + + diff --git a/docs/v2.19/install/cloud.md b/docs/v2.19/install/cloud.md new file mode 100644 index 00000000..1aac0855 --- /dev/null +++ b/docs/v2.19/install/cloud.md @@ -0,0 +1,55 @@ +# Provider Prerequisites for Traffic Manager + +## GKE + +### Firewall Rules for private clusters + +A GKE cluster with private networking will come preconfigured with firewall rules that prevent the Traffic Manager's +webhook injector from being invoked by the Kubernetes API server. +For Telepresence to work in such a cluster, you'll need to [add a firewall rule](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) allowing the Kubernetes masters to access TCP port `8443` in your pods. +For example, for a cluster named `tele-webhook-gke` in region `us-central1-c1`: + +```bash +$ gcloud container clusters describe tele-webhook-gke --region us-central1-c | grep masterIpv4CidrBlock + masterIpv4CidrBlock: 172.16.0.0/28 # Take note of the IP range, 172.16.0.0/28 + +$ gcloud compute firewall-rules list \ + --filter 'name~^gke-tele-webhook-gke' \ + --format 'table( + name, + network, + direction, + sourceRanges.list():label=SRC_RANGES, + allowed[].map().firewall_rule().list():label=ALLOW, + targetTags.list():label=TARGET_TAGS + )' + +NAME NETWORK DIRECTION SRC_RANGES ALLOW TARGET_TAGS +gke-tele-webhook-gke-33fa1791-all tele-webhook-net INGRESS 10.40.0.0/14 esp,ah,sctp,tcp,udp,icmp gke-tele-webhook-gke-33fa1791-node +gke-tele-webhook-gke-33fa1791-master tele-webhook-net INGRESS 172.16.0.0/28 tcp:10250,tcp:443 gke-tele-webhook-gke-33fa1791-node +gke-tele-webhook-gke-33fa1791-vms tele-webhook-net INGRESS 10.128.0.0/9 icmp,tcp:1-65535,udp:1-65535 gke-tele-webhook-gke-33fa1791-node +# Take note fo the TARGET_TAGS value, gke-tele-webhook-gke-33fa1791-node + +$ gcloud compute firewall-rules create gke-tele-webhook-gke-webhook \ + --action ALLOW \ + --direction INGRESS \ + --source-ranges 172.16.0.0/28 \ + --rules tcp:8443 \ + --target-tags gke-tele-webhook-gke-33fa1791-node --network tele-webhook-net +Creating firewall...⠹Created [https://www.googleapis.com/compute/v1/projects/datawire-dev/global/firewalls/gke-tele-webhook-gke-webhook]. +Creating firewall...done. +NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED +gke-tele-webhook-gke-webhook tele-webhook-net INGRESS 1000 tcp:8443 False +``` + +### GKE Authentication Plugin + +Starting with Kubernetes version 1.26 GKE will require the use of the [gke-gcloud-auth-plugin](https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke). +You will need to install this plugin to use Telepresence with Docker while using GKE. + +## EKS + +### EKS Authentication Plugin + +If you are using AWS CLI version earlier than `1.16.156` you will need to install [aws-iam-authenticator](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). +You will need to install this plugin to use Telepresence with Docker while using EKS. \ No newline at end of file diff --git a/docs/v2.3/install/helm.md b/docs/v2.19/install/manager.md similarity index 52% rename from docs/v2.3/install/helm.md rename to docs/v2.19/install/manager.md index 61e78b4f..96325ec1 100644 --- a/docs/v2.3/install/helm.md +++ b/docs/v2.19/install/manager.md @@ -1,46 +1,52 @@ -# Install with Helm +# Install/Uninstall the Traffic Manager -[Helm](https://helm.sh) is a package manager for Kubernetes that automates the release and management of software on Kubernetes. The Telepresence Traffic Manager can be installed via a Helm chart with a few simple steps. +Telepresence uses a traffic manager to send/recieve cloud traffic to the user. Telepresence uses [Helm](https://helm.sh) under the hood to install the traffic manager in your cluster. The `telepresence` binary embeds both `helm` and a helm-chart for a traffic-manager that is of the same version as the binary. -## Before you begin +## Prerequisites -The Telepresence Helm chart is hosted by Ambassador Labs and published at `https://app.getambassador.io`. +Before you begin, you need to have [Telepresence installed](../install/client.md). -Start by adding this repo to your Helm client with the following command: +If you are not the administrator of your cluster, you will need [administrative RBAC permissions](../reference/rbac.md#administrating-telepresence) to install and use Telepresence in your cluster. -```shell -helm repo add datawire https://app.getambassador.io -helm repo update -``` +In addition, you may need certain prerequisites depending on your cloud provider and platform. +See the [cloud provider installation notes](../install/cloud.md) for more. -## Install with Helm +## Install the Traffic Manager -When you run the Helm chart, it installs all the components required for the Telepresence Traffic Manager. +The telepresence cli can install the traffic manager for you. The basic install will install the same version as the client used. -1. If you are installing the Telepresence Traffic Manager **for the first time on your cluster**, create the `ambassador` namespace in your cluster: +1. Install the Telepresence Traffic Manager with the following command: ```shell - kubectl create namespace ambassador + telepresence helm install ``` -2. Install the Telepresence Traffic Manager with the following command: +### Customizing the Traffic Manager. + +For details on what the Helm chart installs and what can be configured, see the Helm chart [configuration on artifacthub](https://artifacthub.io/packages/helm/datawire/telepresence). + +1. Create a values.yaml file with your config values. + +2. Run the `install` command with the `--values` flag set to the path to your values file: ```shell - helm install traffic-manager --namespace ambassador datawire/telepresence + telepresence helm install --values values.yaml + ``` + alternatively, provide values using the `--set` flag: + ```shell + telepresence helm install --set logLevel=debug ``` - -For more details on what the Helm chart installs and what can be configured, take a look at the Helm chart [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). ### Install into custom namespace -The Helm chart supports being installed into any namespace, not necessarily `ambassador`. Simply pass a different `namespace` argument to `helm install`. -For example, if you wanted to deploy the traffic manager to the `staging` namespace: +The Helm chart supports being installed into any namespace, not necessarily `ambassador`. Simply pass a different `namespace` argument to +`telepresence helm install`. For example, if you wanted to deploy the traffic manager to the `staging` namespace: -```bash -helm install traffic-manager --namespace staging datawire/telepresence +```shell +telepresence helm install traffic-manager --namespace staging datawire/telepresence ``` -Note that users of telepresence will need to configure their kubeconfig to find this installation of the traffic manager: +Note that users of Telepresence will need to configure their kubeconfig to find this installation of the Traffic Manager: ```yaml apiVersion: v1 @@ -55,7 +61,29 @@ clusters: name: example-cluster ``` -See [the kubeconfig documentation](../reference/config#manager) for more information. +See [the kubeconfig documentation](../reference/config.md#manager) for more information. + +## Upgrading/Downgrading the Traffic Manager. + +1. Download the cli of the version of Telepresence you wish to use. + +2. Run the `upgrade` command. Optionally with `--values` and/or `--set` flags + + ```shell + telepresence helm upgrade + ``` + You can also use the `--reuse-values` or `--reset-values` to specify if previously installed values should be reused or reset. + + +## Uninstall + +The telepresence cli can uninstall the traffic manager for you using the `telepresence helm uninstall`. + +1. Uninstall the Telepresence Traffic Manager and all the agents installed by it using the following command: + + ```shell + telepresence helm uninstall + ``` ## RBAC @@ -78,8 +106,8 @@ managerRbac: This can then be installed via: -```bash -helm install traffic-manager --namespace staging datawire/telepresence -f ./values.yaml +```shell +telepresence helm install --namespace staging -f ./values.yaml ``` **NOTE** Do not install namespace-scoped Traffic Managers and a global Traffic Manager in the same cluster, as it could have unexpected effects. @@ -92,13 +120,13 @@ It will do this by creating a ConfigMap, called `traffic-manager-claim`, in each So, for example, suppose you install one Traffic Manager to manage namespaces `dev` and `staging`, as: ```bash -helm install traffic-manager --namespace dev datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={dev,staging}' +telepresence helm install --namespace dev --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={dev,staging}' ``` You might then attempt to install another Traffic Manager to manage namespaces `staging` and `prod`: ```bash -helm install traffic-manager --namespace prod datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={staging,prod}' +telepresence helm install --namespace prod --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={staging,prod}' ``` This would fail with an error: @@ -134,32 +162,9 @@ clientRbac: - staging ``` -#### Namespace-scoped webhook - -If you wish to use the traffic-manager's [mutating webhook](../reference/cluster-config#mutating-webhook) with a namespace-scoped traffic manager, you will have to ensure that each namespace has an `app.kubernetes.io/name` label that is identical to its name: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: staging - labels: - app.kubernetes.io/name: staging -``` - -You can also use `kubectl label` to add the label to an existing namespace, e.g.: - -```shell -kubectl label namespace staging app.kubernetes.io/name=staging -``` - -This is required because the mutating webhook will use the name label to find namespaces to operate on. - -**NOTE** This labelling happens automatically in kubernetes >= 1.21. - ### Installing RBAC only -Telepresence Traffic Manager does require some [RBAC](../../refrence/rbac/) for the traffic-manager deployment itself, as well as for users. +Telepresence Traffic Manager does require some [RBAC](../reference/rbac.md) for the traffic-manager deployment itself, as well as for users. To make it easier for operators to introspect / manage RBAC separately, you can use `rbac.only=true` to only create the rbac-related objects. Additionally, you can use `clientRbac.create=true` and `managerRbac.create=true` to toggle which subset(s) of RBAC objects you wish to create. diff --git a/docs/v2.19/install/upgrade.md b/docs/v2.19/install/upgrade.md new file mode 100644 index 00000000..a0b8bbc2 --- /dev/null +++ b/docs/v2.19/install/upgrade.md @@ -0,0 +1,62 @@ +--- +description: "How to upgrade your installation of Telepresence and install previous versions." +--- + +# Upgrade Process +The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. + +Before upgrading your CLI, you must stop any live Telepresence processes by issuing `telepresence quit -s` (or `telepresence quit -ur` +if your current version is less than 2.8.0). + + + + +```shell +# Intel Macs + +# 1. Download the latest binary (~105 MB): +sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-amd64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence + +# Apple silicon Macs + +# 1. Remove the old binary. This is very important on Silicon macs, because they keep track of the binary signature +sudo rm /usr/local/bin/telepresence + +# 2. Download the latest binary (~101 MB): +sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-arm64 -o /usr/local/bin/telepresence + +# 3. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +```shell +# 1. Download the latest binary (~95 MB): +sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-linux-amd64 -o /usr/local/bin/telepresence + +# 2. Make the binary executable: +sudo chmod a+x /usr/local/bin/telepresence +``` + + + + +To upgrade Telepresence,[Click here to download the Telepresence binary](https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-windows-amd64.zip). + +Once you have the binary downloaded and unzipped you will need to do a few things: + +1. Rename the binary from `telepresence-windows-amd64.exe` to `telepresence.exe` +2. Move the binary to `C:\Program Files (x86)\$USER\Telepresence\` + + + + +The Telepresence CLI contains an embedded Helm chart. See [Install/Uninstall the Traffic Manager](../manager.md) if you want to also upgrade +the Traffic Manager in your cluster. + + diff --git a/docs/v2.19/licenses.md b/docs/v2.19/licenses.md new file mode 100644 index 00000000..75dc787e --- /dev/null +++ b/docs/v2.19/licenses.md @@ -0,0 +1,8 @@ +Telepresence incorporates Free and Open Source software under the following licenses: + +* [2-clause BSD license](https://opensource.org/licenses/BSD-2-Clause) +* [3-clause BSD license](https://opensource.org/licenses/BSD-3-Clause) +* [Apache License 2.0](https://opensource.org/licenses/Apache-2.0) +* [ISC license](https://opensource.org/licenses/ISC) +* [MIT license](https://opensource.org/licenses/MIT) +* [Mozilla Public License 2.0](https://opensource.org/licenses/MPL-2.0) diff --git a/docs/2.14/quick-start/index.md b/docs/v2.19/quick-start.md similarity index 65% rename from docs/2.14/quick-start/index.md rename to docs/v2.19/quick-start.md index 7395b118..684fa3c0 100644 --- a/docs/2.14/quick-start/index.md +++ b/docs/v2.19/quick-start.md @@ -4,7 +4,6 @@ description: "Start using Telepresence in your own environment. Follow these ste import Alert from '@material-ui/lab/Alert'; import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' # Telepresence Quickstart @@ -15,68 +14,14 @@ Telepresence is an open source tool that enables you to set up remote developmen - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), the Kubernetes command-line tool, or the OpenShift Container Platform command-line interface, [oc](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). - A Kubernetes Deployment and Service. - - - **Don’t have access to Kubernetes cluster?** Try Telepresence in a free remote Kubernetes cluster without having to mess with your production environment. [Get Started >](https://app.getambassador.io/cloud/welcome?select=developer&utm_source=telepresence&utm_medium=website&utm_campaign=quickstart). - - - -## Install Telepresence on Your Machine - -Install Telepresence by running the relevant commands below for your OS. If you are not the administrator of your cluster, you will need [administrative RBAC permissions](../reference/rbac/#administrating-telepresence) to install and use the Telepresence traffic-manager in your cluster. - - - - -```shell -# Intel Macs - -# 1. Download the latest binary (~105 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-amd64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# 1. Download the latest binary (~101 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-arm64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~95 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-linux-amd64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -Installing Telepresence on Windows is easy. Download and run this [installer](https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-windows-amd64.exe) and follow the on-screen instructions. - - - -## Install Telepresence in Your Cluster - -1. Install the traffic manager into your cluster with `telepresence helm install`. More information about installing Telepresence can be found [here](../install/manager). This will require root access on your machine. +## Install Telepresence -``` -$ telepresence helm install -... -Traffic Manager installed successfully -``` +Follow [Install Client](install/client.md) and [Install Traffic Manager](install/manager.md) instructions to install the +telepresence client on your workstation, and the traffic manager in your cluster. ## Intercept Your Service -With Telepresence, you can create [global intercepts](../concepts/intercepts/?intercept=global) that intercept all traffic going to a service in your remote cluster and route it to your local environment instead. +With Telepresence, you can create [intercepts](concepts/intercepts.md) that intercept all traffic going to a service in your remote cluster and route it to your local environment instead. 1. Connect to your cluster with `telepresence connect` and connect to the Kubernetes API server: @@ -172,6 +117,6 @@ Now, with Telepresence, you can: ## What’s Next? -- [Learn about the Telepresence architecture.](../reference/architecture) +- [Learn about the Telepresence architecture.](reference/architecture) \ No newline at end of file diff --git a/docs/2.14/redirects.yml b/docs/v2.19/redirects.yml similarity index 100% rename from docs/2.14/redirects.yml rename to docs/v2.19/redirects.yml diff --git a/docs/v2.19/reference/architecture.md b/docs/v2.19/reference/architecture.md new file mode 100644 index 00000000..56a5b2a1 --- /dev/null +++ b/docs/v2.19/reference/architecture.md @@ -0,0 +1,49 @@ +--- +description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." +--- + +# Telepresence Architecture + +## Telepresence CLI + +The Telepresence CLI orchestrates the moving parts on the workstation: it starts the Telepresence Daemons and then acts +as a user-friendly interface to the Telepresence User Daemon. + +## Telepresence Daemons +Telepresence has Daemons that run on a developer's workstation and act as the main point of communication to the cluster's +network in order to communicate with the cluster and handle intercepted traffic. + +### User-Daemon +The User-Daemon coordinates the creation and deletion of intercepts by communicating with the [Traffic Manager](#traffic-manager). +All requests from and to the cluster go through this Daemon. + +### Root-Daemon +The Root-Daemon manages the networking necessary to handle traffic between the local workstation and the cluster by setting up a +[Virtual Network Device](tun-device.md) (VIF). For a detailed description of how the VIF manages traffic and why it is necessary +please refer to this blog post: +[Implementing Telepresence Networking with a TUN Device](https://blog.getambassador.io/implementing-telepresence-networking-with-a-tun-device-a23a786d51e9). + +## Traffic Manager + +The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons +on developer workstations. It is responsible for injecting the Traffic Agent sidecar into intercepted pods, proxying all +relevant inbound and outbound traffic, and tracking active intercepts. + +The Traffic-Manager is installed, either by a cluster administrator using a Helm Chart, or on demand by the Telepresence +User Daemon. When the User Daemon performs its initial connect, it first checks the cluster for the Traffic Manager +deployment, and if missing it will make an attempt to install it using its embedded Helm Chart. + +When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud +so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager +without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview +URL, it forwards the request to the ingress service specified at the Preview URL creation. + +## Traffic Agent + +The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is first started, the Traffic Agent +container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `telepresence list` +or `kubectl describe pod `. + +Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the +Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the +pod usually handling requests on that port. diff --git a/docs/v2.19/reference/client.md b/docs/v2.19/reference/client.md new file mode 100644 index 00000000..69356c61 --- /dev/null +++ b/docs/v2.19/reference/client.md @@ -0,0 +1,25 @@ +--- +description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." +--- + +# Client reference + +The [Telepresence CLI client](../quick-start.md) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. + +## Commands + +A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. +You can append `--help` to each command below to get even more information about its usage. + +| Command | Description | +|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | +| `status` | Shows the current connectivity status | +| `quit` | Tell Telepresence daemons to quit | +| `list` | Lists the current active intercepts | +| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port ` (use `port/UDP` to force UDP). This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000`. A special flag `--docker-run` can be used to run the local instance [in a docker container](docker-run.md). | +| `leave` | Stops an active intercept: `telepresence leave hello` | +| `loglevel` | Temporarily change the log-level of the traffic-manager, traffic-agents, and user and root daemons | +| `gather-logs` | Gather logs from traffic-manager, traffic-agents, user, and root daemons, and export them into a zip file that can be shared with others or included with a github issue. Use `--get-pod-yaml` to include the yaml for the `traffic-manager` and `traffic-agent`s. Use `--anonymize` to replace the actual pod names + namespaces used for the `traffic-manager` and pods containing `traffic-agent`s in the logs. | +| `version` | Show version of Telepresence CLI + Traffic-Manager (if connected) | +| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. | \ No newline at end of file diff --git a/docs/v2.19/reference/cluster-config.md b/docs/v2.19/reference/cluster-config.md new file mode 100644 index 00000000..4a5addfc --- /dev/null +++ b/docs/v2.19/reference/cluster-config.md @@ -0,0 +1,183 @@ +import Alert from '@material-ui/lab/Alert'; + +# Cluster-side configuration + +For the most part, Telepresence doesn't require any special +configuration in the cluster and can be used right away in any +cluster (as long as the user has adequate [RBAC permissions](rbac.md) +and the cluster's server version is `1.19.0` or higher). + +## Helm Chart configuration +Some cluster specific configuration can be provided when installing +or upgrading the Telepresence cluster installation using Helm. Once +installed, the Telepresence client will configure itself from values +that it receives when connecting to the Traffic manager. + +See the Helm chart [README](https://artifacthub.io/packages/helm/telepresence-oss/telepresence-oss/$version$) +for a full list of available configuration settings. + +### Values +To add configuration, create a yaml file with the configuration values and then use it executing `telepresence helm install [--upgrade] --values ` + +## Client Configuration + +It is possible for the Traffic Manager to automatically push config to all +connecting clients. To learn more about this, please see the [client config docs](config.md#global-configuration) + +## Traffic Manager Configuration + +The `trafficManager` structure of the Helm chart configures the behavior of the Telepresence traffic manager. + +## Agent Configuration + +The `agent` structure of the Helm chart configures the behavior of the Telepresence agents. + +### Image Configuration + +The `agent.image` structure contains the following values: + +| Setting | Meaning | +|------------|-----------------------------------------------------------------------------| +| `registry` | Registry used when downloading the image. Defaults to "docker.io/datawire". | +| `name` | The name of the image. Defaults to "tel2" | +| `tag` | The tag of the image. Defaults to $version$ | + +### Log level + +The `agent.LogLevel` controls the log level of the traffic-agent. See [Log Levels](config.md#log-levels) for more info. + +### Resources + +The `agent.resources` and `agent.initResources` will be used as the `resources` element when injecting traffic-agents and init-containers. + +## Mutating Webhook + +Telepresence uses a Mutating Webhook to inject the [Traffic Agent](architecture.md#traffic-agent) sidecar container and update the +port definitions. This means that an intercepted workload (Deployment, StatefulSet, ReplicaSet) will remain untouched +and in sync as far as GitOps workflows (such as ArgoCD) are concerned. + +The injection will happen on demand the first time an attempt is made to intercept the workload. + +If you want to prevent that the injection ever happens, simply add the `telepresence.getambassador.io/inject-traffic-agent: disabled` +annotation to your workload template's annotations: + +```diff + spec: + template: + metadata: + labels: + service: your-service ++ annotations: ++ telepresence.getambassador.io/inject-traffic-agent: disabled + spec: + containers: +``` + +### Service Name and Port Annotations + +Telepresence will automatically find all services and all ports that will connect to a workload and make them available +for an intercept, but you can explicitly define that only one service and/or port can be intercepted. + +```diff + spec: + template: + metadata: + labels: + service: your-service + annotations: ++ telepresence.getambassador.io/inject-service-name: my-service ++ telepresence.getambassador.io/inject-service-port: https + spec: + containers: +``` + +### Ignore Certain Volume Mounts + +An annotation `telepresence.getambassador.io/inject-ignore-volume-mounts` can be used to make the injector ignore certain volume mounts denoted by a comma-separated string. The specified volume mounts from the original container will not be appended to the agent sidecar container. + +```diff + spec: + template: + metadata: + annotations: ++ telepresence.getambassador.io/inject-ignore-volume-mounts: "foo,bar" + spec: + containers: +``` + +### Note on Numeric Ports + +If the targetPort of your intercepted service is pointing at a port number, in addition to +injecting the Traffic Agent sidecar, Telepresence will also inject an initContainer that will +reconfigure the pod's firewall rules to redirect traffic to the Traffic Agent. + + +Note that this initContainer requires `NET_ADMIN` capabilities. +If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. + + +For example, the following service is using a numeric port, so Telepresence would inject an initContainer into it: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: your-service +spec: + type: ClusterIP + selector: + service: your-service + ports: + - port: 80 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: your-service + labels: + service: your-service +spec: + replicas: 1 + selector: + matchLabels: + service: your-service + template: + metadata: + annotations: + telepresence.getambassador.io/inject-traffic-agent: enabled + labels: + service: your-service + spec: + containers: + - name: your-container + image: jmalloc/echo-server + ports: + - containerPort: 8080 +``` + +## Excluding Envrionment Variables + +If your pod contains sensitive variables like a database password, or third party API Key, you may want to exclude those from being propagated through an intercept. +Telepresence allows you to configure this through a ConfigMap that is then read and removes the sensitive variables. + +This can be done in two ways: + +When installing your traffic-manager through helm you can use the `--set` flag and pass a comma separated list of variables: + +`telepresence helm install --set intercept.environment.excluded="{DATABASE_PASSWORD,API_KEY}"` + +This also applies when upgrading: + +`telepresence helm upgrade --set intercept.environment.excluded="{DATABASE_PASSWORD,API_KEY}"` + +Once this is completed, the environment variables will no longer be in the environment file created by an Intercept. + +The other way to complete this is in your custom `values.yaml`. Customizing your traffic-manager through a values file can be viewed [here](../install/manager.md). + +```yaml +intercept: + environment: + excluded: ['DATABASE_PASSWORD', 'API_KEY'] +``` + +You can exclude any number of variables, they just need to match the `key` of the variable within a pod to be excluded. diff --git a/docs/v2.19/reference/config.md b/docs/v2.19/reference/config.md new file mode 100644 index 00000000..31140a8c --- /dev/null +++ b/docs/v2.19/reference/config.md @@ -0,0 +1,326 @@ +# Laptop-side configuration + +There are a number of configuration values that can be tweaked to change how Telepresence behaves. +These can be set in two ways: globally, by a platform engineer with powers to deploy the Telepresence Traffic Manager, or locally by any user. +One important exception is the location of the traffic manager itself, which, if it's different from the default of `ambassador`, [must be set](#manager) locally per-cluster to be able to connect. + +## Global Configuration + +Global configuration is set at the Traffic Manager level and applies to any user connecting to that Traffic Manager. +To set it, simply pass in a `client` dictionary to the `telepresence helm install` command, with any config values you wish to set. + +The `client` config supports values for [cluster](#cluster), [dns](#dns), [grpc](#grpc), [images](#images), [logLevels](#log-levels), [routing](#routing), +and [timeouts](#timeouts). + +Here is an example configuration to show you the conventions of how Telepresence is configured: +**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** + +```yaml +client: + timeouts: + agentInstall: 1m + intercept: 10s + logLevels: + userDaemon: debug + images: + registry: privateRepo # This overrides the default docker.io/datawire repo + agentImage: tel2:$version$ # This overrides the agent image to inject when intercepting + grpc: + maxReceiveSize: 10Mi + dns: + includeSuffixes: [.private] + excludeSuffixes: [.se, .com, .io, .net, .org, .ru] + lookupTimeout: 30s + routing: + alsoProxySubnets: + - 1.2.3.4/32 + neverProxySubnets: + - 1.2.3.4/32 +``` + +### Cluster +Values for `client.cluster` controls aspects on how client's connection to the traffic-manager. + +| Field | Description | Type | Default | +|---------------------------|--------------------------------------------------------------------|---------------------------------------------|--------------------| +| `defaultManagerNamespace` | The default namespace where the Traffic Manager will be installed. | [string][yaml-str] | ambassador | +| `mappedNamespaces` | Namespaces that will be mapped by default. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | +| `connectFromRootDaeamon` | Make connections to the cluster directly from the root daemon. | [boolean][yaml-bool] | `true` | +| `agentPortForward` | Let telepresence-client use port-forwards directly to agents | [boolean][yaml-bool] | `true` | +| `virtualIPSubnet` | The CIDR to use when generating virtual IPs | [string][yaml-str] | platform dependent | + +### DNS + +The `client.dns` configuration offers options for configuring the DNS resolution behavior in a client application or system. Here is a summary of the available fields: + +The fields for `client.dns` are: `localIP`, `excludeSuffixes`, `includeSuffixes`, and `lookupTimeout`. + +| Field | Description | Type | Default | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------|----------------------------------------------------| +| `localIP` | The address of the local DNS server. This entry is only used on Linux systems that are not configured to use systemd-resolved. | IP address [string][yaml-str] | first `nameserver` mentioned in `/etc/resolv.conf` | +| `excludeSuffixes` | Suffixes for which the DNS resolver will always fail (or fallback in case of the overriding resolver). Can be globally configured in the Helm chart. | [sequence][yaml-seq] of [strings][yaml-str] | `[".arpa", ".com", ".io", ".net", ".org", ".ru"]` | +| `includeSuffixes` | Suffixes for which the DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes. Can be globally configured in the Helm chart. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | +| `excludes` | Names to be excluded by the DNS resolver | `[]` | +| `mappings` | Names to be resolved to other names (CNAME records) or to explicit IP addresses | `[]` | +| `lookupTimeout` | Maximum time to wait for a cluster side host lookup. | [duration][go-duration] [string][yaml-str] | 4 seconds | + +Here is an example values.yaml: +```yaml +client: + dns: + includeSuffixes: [.private] + excludeSuffixes: [.se, .com, .io, .net, .org, .ru] + localIP: 8.8.8.8 + lookupTimeout: 30s +``` + +#### Mappings + +Allows you to map hostnames to aliases or to IP addresses. This is useful when you want to use an alternative name for a service in the cluster, or when you want the DNS resolver to map a name to an IP address of your choice. + +In the given cluster, the service named `postgres` is located within a separate namespace titled `big-data`, and it's referred to as `psql` : + +```yaml +dns: + mappings: + - name: postgres + aliasFor: psql.big-data + - name: my.own.domain + aliasFor: 192.168.0.15 +``` + +#### Exclude + +Lists service names to be excluded from the Telepresence DNS server. This is useful when you want your application to interact with a local service instead of a cluster service. In this example, "redis" will not be resolved by the cluster, but locally. + +```yaml +dns: + excludes: + - redis +``` + +### Grpc +The `maxReceiveSize` determines how large a message that the workstation receives via gRPC can be. The default is 4Mi (determined by gRPC). All traffic to and from the cluster is tunneled via gRPC. + +The size is measured in bytes. You can express it as a plain integer or as a fixed-point number using E, G, M, or K. You can also use the power-of-two equivalents: Gi, Mi, Ki. For example, the following represent roughly the same value: +``` +128974848, 129e6, 129M, 123Mi +``` + +### Images +Values for `client.images` are strings. These values affect the objects that are deployed in the cluster, +so it's important to ensure users have the same configuration. + +These are the valid fields for the `client.images` key: + +| Field | Description | Type | Default | +|---------------|------------------------------------------------------------------------------------------|------------------------------------------------|-------------------------------------| +| `registry` | Docker registry to be used for installing the Traffic Manager and default Traffic Agent. | Docker registry name [string][yaml-str] | `docker.io/datawire` | +| `agentImage` | `$registry/$imageName:$imageTag` to use when installing the Traffic Agent. | qualified Docker image name [string][yaml-str] | (unset) | +| `clientImage` | `$registry/$imageName:$imageTag` to use locally when connecting with `--docker`. | qualified Docker image name [string][yaml-str] | `$registry/ambassador-telepresence` | + +### Intercept + +The `intercept` controls applies to how Telepresence will intercept the communications to the intercepted service. + +| Field | Description | Type | Default | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------|---------------------|--------------| +| `defaultPort` | controls which port is selected when no `--port` flag is given to the `telepresence intercept` command. | int | 8080 | +| `useFtp` | Use fuseftp instead of sshfs when mounting remote file systems | boolean | false | + +### Log Levels + +Values for the `client.logLevels` fields are one of the following strings, +case-insensitive: + +- `trace` +- `debug` +- `info` +- `warning` or `warn` +- `error` + +For whichever log-level you select, you will get logs labeled with that level and of higher severity. +(e.g. if you use `info`, you will also get logs labeled `error`. You will NOT get logs labeled `debug`. + +These are the valid fields for the `client.logLevels` key: + +| Field | Description | Type | Default | +|--------------|---------------------------------------------------------------------|---------------------------------------------|---------| +| `userDaemon` | Logging level to be used by the User Daemon (logs to connector.log) | [loglevel][logrus-level] [string][yaml-str] | debug | +| `rootDaemon` | Logging level to be used for the Root Daemon (logs to daemon.log) | [loglevel][logrus-level] [string][yaml-str] | info | + +### Routing + +#### AlsoProxySubnets + +When using `alsoProxySubnets`, you provide a list of subnets to be added to the TUN device. +All connections to addresses that the subnet spans will be dispatched to the cluster + +Here is an example values.yaml for the subnet `1.2.3.4/32`: +```yaml +client: + routing: + alsoProxySubnets: + - 1.2.3.4/32 +``` + +#### NeverProxySubnets + +When using `neverProxySubnets` you provide a list of subnets. These will never be routed via the TUN device, +even if they fall within the subnets (pod or service) for the cluster. Instead, whatever route they have before +telepresence connects is the route they will keep. + +Here is an example kubeconfig for the subnet `1.2.3.4/32`: + +```yaml +client: + routing: + neverProxySubnets: + - 1.2.3.4/32 +``` + +#### Using AlsoProxy together with NeverProxy + +Never proxy and also proxy are implemented as routing rules, meaning that when the two conflict, regular routing routes apply. +Usually this means that the most specific route will win. + +So, for example, if an `alsoProxySubnets` subnet falls within a broader `neverProxySubnets` subnet: + +```yaml +neverProxySubnets: [10.0.0.0/16] +alsoProxySubnets: [10.0.5.0/24] +``` + +Then the specific `alsoProxySubnets` of `10.0.5.0/24` will be proxied by the TUN device, whereas the rest of `10.0.0.0/16` will not. + +Conversely, if a `neverProxySubnets` subnet is inside a larger `alsoProxySubnets` subnet: + +```yaml +alsoProxySubnets: [10.0.0.0/16] +neverProxySubnets: [10.0.5.0/24] +``` + +Then all of the `alsoProxySubnets` of `10.0.0.0/16` will be proxied, with the exception of the specific `neverProxySubnets` of `10.0.5.0/24` + +### Timeouts + +Values for `client.timeouts` are all durations either as a number of seconds +or as a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings +can be fractional (`1.5h`) or combined (`2h45m`). + +These are the valid fields for the `timeouts` key: + +| Field | Description | Type | Default | +|-------------------------|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|------------| +| `agentInstall` | Waiting for Traffic Agent to be installed | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | +| `apply` | Waiting for a Kubernetes manifest to be applied | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 1 minute | +| `clusterConnect` | Waiting for cluster to be connected | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | +| `connectivityCheck` | Timeout used when checking if cluster is already proxied on the workstation | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 500 ms | +| `endpointDial` | Waiting for a Dial to a service for which the IP is known | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 3 seconds | +| `roundtripLatency` | How much to add to the endpointDial timeout when establishing a remote connection | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 seconds | +| `intercept` | Waiting for an intercept to become active | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 30 seconds | +| `proxyDial` | Waiting for an outbound connection to be established | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | +| `trafficManagerConnect` | Waiting for the Traffic Manager API to connect for port forwards | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 60 seconds | +| `trafficManagerAPI` | Waiting for connection to the gPRC API after `trafficManagerConnect` is successful | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 15 seconds | +| `helm` | Waiting for Helm operations (e.g. `install`) on the Traffic Manager | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 30 seconds | + +## Local Overrides + +In addition, it is possible to override each of these variables at the local level by setting up new values in local config files. +There are two types of config values that can be set locally: those that apply to all clusters, which are set in a single `config.yml` file, and those +that only apply to specific clusters, which are set as extensions to the `$KUBECONFIG` file. + +### Config for all clusters +Telepresence uses a `config.yml` file to store and change those configuration values that will be used for all clusters you use Telepresence with. +The location of this file varies based on your OS: + +* macOS: `$HOME/Library/Application Support/telepresence/config.yml` +* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` +* Windows: `%APPDATA%\telepresence\config.yml` + +For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. + +### Values + +The config file currently supports values for the [cluster](#cluster), [grpc](#grpc), [images](#images), [logLevels](#log-levels), +and [timeouts](#timeouts) keys. +The definitions of these values are identical to those values in the `client` config above. + +Here is an example configuration to show you the conventions of how Telepresence is configured: +**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** + +```yaml +timeouts: + agentInstall: 1m + intercept: 10s +logLevels: + userDaemon: debug +images: + registry: privateRepo # This overrides the default docker.io/datawire repo + agentImage: tel2:$version$ # This overrides the agent image to inject when intercepting +grpc: + maxReceiveSize: 10Mi +``` + + +## Workstation Per-Cluster Configuration + +Configuration that is specific to a cluster can also be overriden per-workstation by modifying your `$KUBECONFIG` file. +It is recommended that you do not do this, and instead rely on upstream values provided to the Traffic Manager. This ensures +that all users that connect to the Traffic Manager will have the same routing and DNS resolution behavior. +An important exception to this is the [`manager.namespace` configuration](#manager) which must be set locally. + +### Values + +The kubeconfig supports values for `dns`, `also-proxy`, `never-proxy`, and `manager`. + +Example kubeconfig: +```yaml +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1 + extensions: + - name: telepresence.io + extension: + manager: + namespace: staging + dns: + include-suffixes: [.private] + exclude-suffixes: [.se, .com, .io, .net, .org, .ru] + local-ip: 8.8.8.8 + lookup-timeout: 30s + never-proxy: [10.0.0.0/16] + also-proxy: [10.0.5.0/24] + name: example-cluster +``` + +#### Manager + +This is the one cluster configuration that cannot be set using the Helm chart because it defines how Telepresence connects to +the Traffic manager. When not default, that setting needs to be configured in the workstation's kubeconfig for the cluster. + +The `manager` key contains configuration for finding the `traffic-manager` that telepresence will connect to. It supports one key, `namespace`, indicating the namespace where the traffic manager is to be found + +Here is an example kubeconfig that will instruct telepresence to connect to a manager in namespace `staging`: + +```yaml +apiVersion: v1 +clusters: + - cluster: + server: https://127.0.0.1 + extensions: + - name: telepresence.io + extension: + manager: + namespace: staging + name: example-cluster +``` + +[yaml-bool]: https://yaml.org/type/bool.html +[yaml-float]: https://yaml.org/type/float.html +[yaml-int]: https://yaml.org/type/int.html +[yaml-seq]: https://yaml.org/type/seq.html +[yaml-str]: https://yaml.org/type/str.html +[go-duration]: https://pkg.go.dev/time#ParseDuration +[logrus-level]: https://github.com/sirupsen/logrus/blob/v1.8.1/logrus.go#L25-L45 diff --git a/docs/v2.3/reference/dns.md b/docs/v2.19/reference/dns.md similarity index 66% rename from docs/v2.3/reference/dns.md rename to docs/v2.19/reference/dns.md index e38fbc61..2fedcd42 100644 --- a/docs/v2.3/reference/dns.md +++ b/docs/v2.19/reference/dns.md @@ -6,7 +6,7 @@ All intercepts contribute to the DNS resolver, even those that do not use the `- No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. +See this demonstrated below, using the [quick start's](../quick-start.md) sample app services. No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. @@ -25,15 +25,6 @@ $ telepresence list $ curl web-app:80 - curl: (6) Could not resolve host: web-app - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl web-app.emojivoto:80 - @@ -42,8 +33,7 @@ $ curl web-app.emojivoto:80 ... ``` -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. +Now we'll start an intercept against another service. ``` $ telepresence intercept web --port 8080 @@ -55,8 +45,7 @@ $ telepresence intercept web --port 8080 Workload kind : Deployment Destination : 127.0.0.1:8080 Volume Mount Point: /tmp/telfs-166119801 - Intercepting : HTTP requests that match all headers: - 'x-telepresence-intercept-id: 8eac04e3-bf24-4d62-b3ba-35297c16f5cd:web' + Intercepting : all TCP connections $ curl webapp:80 @@ -68,8 +57,12 @@ $ curl webapp:80 ... ``` -Now curling that service by its short name works and will as long as the intercept is active. +The DNS resolver will also be able to resolve services using `.` regardless of what namespace the +client is connected to. + +### Supported Query Types -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. +The Telepresence DNS resolver is now capable of resolving queries of type `A`, `AAAA`, `CNAME`, +`MX`, `NS`, `PTR`, `SRV`, and `TXT`. -See [Outbound connectivity](../routing/#dns-resolution) for details on DNS lookups. +See [Outbound connectivity](routing#dns-resolution.md) for details on DNS lookups. diff --git a/docs/v2.19/reference/docker-run.md b/docs/v2.19/reference/docker-run.md new file mode 100644 index 00000000..fb1b7d8d --- /dev/null +++ b/docs/v2.19/reference/docker-run.md @@ -0,0 +1,87 @@ +--- +Description: "How a Telepresence intercept can run a Docker container with configured environment and volume mounts." +--- + +# Using Docker for intercepts + +## Using command flags + +### The docker flag +You can start the Telepresence daemon in a Docker container on your laptop using the command: + +```console +$ telepresence connect --docker +``` + +The `--docker` flag is a global flag, and if passed directly like `telepresence intercept --docker`, then the implicit connect that takes place if no connections is active, will use a container based daemon. + +### The docker-run flag + +If you want your intercept to go to another Docker container, you can use the `--docker-run` flag. It creates the intercept, runs your container in the foreground, then automatically ends the intercept when the container exits. + +```console +$ telepresence intercept --port --docker-run -- +``` + +The `--` separates flags intended for `telepresence intercept` from flags intended for `docker run`. + +It's recommended that you always use the `--docker-run` in combination with the global `--docker` flag, because that makes everything less intrusive. +- No admin user access is needed. Network modifications are confined to a Docker network. +- There's no need for special filesystem mount software like MacFUSE or WinFSP. The volume mounts happen in the Docker engine. + +The following happens under the hood when both flags are in use: + +- The network of for the intercept handler will be set to the same as the network used by the daemon. This guarantees that the + intercept handler can access the Telepresence VIF, and hence have access the cluster. +- Volume mounts will be automatic and made using the Telemount Docker volume plugin so that all volumes exposed by the intercepted + container are mounted on the intercept handler container. +- The environment of the intercepted container becomes the environment of the intercept handler container. + +### The docker-build flag + +The `--docker-build ` and the repeatable `docker-build-opt key=value` flags enable container's to be build on the fly by the intercept command. + +When using `--docker-build`, the image name used in the argument list must be verbatim `IMAGE`. The word acts as a placeholder and will be replaced by the ID of the image that is built. + +The `--docker-build` flag implies `--docker-run`. + +## Using docker-run flag without docker + +It is possible to use `--docker-run` with a daemon running on your host, which is the default behavior of Telepresence. + +However, it isn't recommended since you'll be in a hybrid mode: while your intercept runs in a container, the daemon will modify the host network, and if remote mounts are desired, they may require extra software. + +The ability to use this special combination is retained for backward compatibility reasons. It might be removed in a future release of Telepresence. + +The `--port` flag has slightly different semantics and can be used in situations when the local and container port must be different. This +is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. + +## Examples + +Imagine you are working on a new version of your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. + +```console +$ telepresence intercept --docker frontend-v1 --port 8000 --docker-run -- frontend-v2 +``` + +Now, imagine that the `frontend-v2` image is built by a `Dockerfile` that resides in the directory `images/frontend-v2`. You can build and intercept directly. + +```console +$ telepresence intercept --docker frontend-v1 --port8000 --docker-build images/frontend-v2 --docker-build-opt tag=mytag -- IMAGE +``` + +## Automatic flags + +Telepresence will automatically pass some relevant flags to Docker in order to connect the container with the intercept. Those flags are combined with the arguments given after `--` on the command line. + +- `--env-file ` Loads the intercepted environment +- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line +- `-v ` Volume mount specification, see CLI help for `--docker-mount` flags for more info + +When used with a container based daemon: +- `--rm` Mandatory, because the volume mounts cannot be removed until the container is removed. +- `-v :` Volume mount specifications propagated from the intercepted container + +When used with a daemon that isn't container based: +- `--dns-search tel2-search` Enables single label name lookups in intercepted namespaces +- `-p ` The local port for the intercept and the container port diff --git a/docs/v2.5/reference/environment.md b/docs/v2.19/reference/environment.md similarity index 98% rename from docs/v2.5/reference/environment.md rename to docs/v2.19/reference/environment.md index 7f83ff11..0882dea0 100644 --- a/docs/v2.5/reference/environment.md +++ b/docs/v2.19/reference/environment.md @@ -32,7 +32,7 @@ There are three options available to do this: Telepresence adds some useful environment variables in addition to the ones imported from the intercepted pod: ### TELEPRESENCE_ROOT -Directory where all remote volumes mounts are rooted. See [Volume Mounts](../volume/) for more info. +Directory where all remote volumes mounts are rooted. See [Volume Mounts](volume.md) for more info. ### TELEPRESENCE_MOUNTS Colon separated list of remotely mounted directories. diff --git a/docs/v2.19/reference/inside-container.md b/docs/v2.19/reference/inside-container.md new file mode 100644 index 00000000..59b7fdf5 --- /dev/null +++ b/docs/v2.19/reference/inside-container.md @@ -0,0 +1,11 @@ +# Running Telepresence inside a container + +The `telepresence connect` command now has the option `--docker`. This option tells telepresence to start the Telepresence daemon in a +docker container. + +Running the daemon in a container brings many advantages. The daemon will no longer make modifications to the host's network or DNS, and +it will not mount files in the host's filesystem. Consequently, it will not need admin privileges to run, nor will it need special software +like macFUSE or WinFSP to mount the remote file systems. + +The intercept handler (the process that will receive the intercepted traffic) must also be a docker container, because that is the only +way to access the cluster network that the daemon makes available, and to mount the docker volumes needed. diff --git a/docs/v2.19/reference/intercepts/cli.md b/docs/v2.19/reference/intercepts/cli.md new file mode 100644 index 00000000..88530427 --- /dev/null +++ b/docs/v2.19/reference/intercepts/cli.md @@ -0,0 +1,297 @@ +import Alert from '@material-ui/lab/Alert'; + +# Configuring intercept using CLI + +## Specifying a namespace for an intercept + +The namespace of the intercepted workload is specified during connect using the `--namespace` option. + +```shell +telepresence connect --namespace myns +telepresence intercept hello --port 9000 +``` + +## Importing environment variables + +Telepresence can import the environment variables from the pod that is +being intercepted, see [this doc](../environment.md) for more details. + +## Creating an intercept + +The following command will intercept all traffic bound to the service and proxy it to your +laptop. This includes traffic coming through your ingress controller, so use this option +carefully as to not disrupt production environments. + +```shell +telepresence intercept --port= +``` + +Run `telepresence status` to see the list of active intercepts. + +```console +$ telepresence status +OSS User Daemon: Running + Version : v2.18.0 + Executable : /usr/local/bin/telepresence + Install ID : 4b1658f3-7ff8-4af3-66693-f521bc1da32f + Status : Connected + Kubernetes server : https://cluster public IP> + Kubernetes context: default + Namespace : default + Manager namespace : ambassador + Intercepts : 1 total + dataprocessingnodeservice: @ +OSS Root Daemon: Running + Version: v2.18.0 + DNS : + Remote IP : 127.0.0.1 + Exclude suffixes: [.com .io .net .org .ru] + Include suffixes: [] + Timeout : 8s + Subnets: (2 subnets) + - 10.96.0.0/16 + - 10.244.0.0/24 +OSS Traffic Manager: Connected + Version : v2.19.0 + Traffic Agent: docker.io/datawire/tel2:2.18.0 +``` + +Finally, run `telepresence leave ` to stop the intercept. + +[kube-multi-port-services]: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + +```console +$ telepresence intercept --port=: +Using Deployment +intercepted + Intercept name : + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1: + Service Port Identifier: + Intercepting : all TCP connections +``` + +When intercepting a service that has multiple ports, the name of the +service port that has been intercepted is also listed. + +If you want to change which port has been intercepted, you can create +a new intercept the same way you did above and it will change which +service port is being intercepted. + +## Creating an intercept When multiple services match your workload + +Oftentimes, there's a 1-to-1 relationship between a service and a +workload, so telepresence is able to auto-detect which service it +should intercept based on the workload you are trying to intercept. +But if you use something like +[Argo](https://www.getambassador.io/docs/argo/latest/), there may be +two services (that use the same labels) to manage traffic between a +canary and a stable service. + +Fortunately, if you know which service you want to use when +intercepting a workload, you can use the `--service` flag. So in the +aforementioned example, if you wanted to use the `echo-stable` service +when intercepting your workload, your command would look like this: + +```console +$ telepresence intercept echo-rollout- --port --service echo-stable +Using ReplicaSet echo-rollout- +intercepted + Intercept name : echo-rollout- + State : ACTIVE + Workload kind : ReplicaSet + Destination : 127.0.0.1:3000 + Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 + Intercepting : all TCP connections +``` + +## Intercepting multiple ports + +It is possible to intercept more than one service and/or service port that are using the same workload. You do this +by creating more than one intercept that identify the same workload using the `--workload` flag. + +Let's assume that we have a service `multi-echo` with the two ports `http` and `grpc`. They are both +targeting the same `multi-echo` deployment. + +```console +$ telepresence intercept multi-echo-http --workload multi-echo --port 8080:http +Using Deployment multi-echo +intercepted + Intercept name : multi-echo-http + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Service Port Identifier: http + Volume Mount Point : /tmp/telfs-893700837 + Intercepting : all TCP requests +$ telepresence intercept multi-echo-grpc --workload multi-echo --port 8443:grpc --mechanism tcp +Using Deployment multi-echo +intercepted + Intercept name : multi-echo-grpc + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8443 + Service Port Identifier: extra + Volume Mount Point : /tmp/telfs-1277723591 + Intercepting : all TCP requests +``` + +## Port-forwarding an intercepted container's sidecars + +Sidecars are containers that sit in the same pod as an application +container; they usually provide auxiliary functionality to an +application, and can usually be reached at +`localhost:${SIDECAR_PORT}`. For example, a common use case for a +sidecar is to proxy requests to a database, your application would +connect to `localhost:${SIDECAR_PORT}`, and the sidecar would then +connect to the database, perhaps augmenting the connection with TLS or +authentication. + +When intercepting a container that uses sidecars, you might want those +sidecars' ports to be available to your local application at +`localhost:${SIDECAR_PORT}`, exactly as they would be if running +in-cluster. Telepresence's `--to-pod ${PORT}` flag implements this +behavior, adding port-forwards for the port given. + +```console +$ telepresence intercept --port=: --to-pod= +Using Deployment +intercepted + Intercept name : + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1: + Service Port Identifier: + Intercepting : all TCP connections +``` + +If there are multiple ports that you need forwarded, simply repeat the +flag (`--to-pod= --to-pod=`). + +## Intercepting headless services + +Kubernetes supports creating [services without a ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services), +which, when they have a pod selector, serve to provide a DNS record that will directly point to the service's backing pods. +Telepresence supports intercepting these `headless` services as it would a regular service with a ClusterIP. +So, for example, if you have the following service: + +```yaml +--- +apiVersion: v1 +kind: Service +metadata: + name: my-headless +spec: + type: ClusterIP + clusterIP: None + selector: + service: my-headless + ports: + - port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: my-headless + labels: + service: my-headless +spec: + replicas: 1 + serviceName: my-headless + selector: + matchLabels: + service: my-headless + template: + metadata: + labels: + service: my-headless + spec: + containers: + - name: my-headless + image: jmalloc/echo-server + ports: + - containerPort: 8080 + resources: {} +``` + +You can intercept it like any other: + +```console +$ telepresence intercept my-headless --port 8080 +Using StatefulSet my-headless +intercepted + Intercept name : my-headless + State : ACTIVE + Workload kind : StatefulSet + Destination : 127.0.0.1:8080 + Volume Mount Point: /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-524189712 + Intercepting : all TCP connections +``` + + +This utilizes an initContainer that requires `NET_ADMIN` capabilities. +If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. + + + +This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. +To enable running as GID 7777 on a specific openshift namespace, run: +oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE + + + +Intercepting headless services without a selector is not supported. + + +## Specifying the intercept traffic target + +By default, it's assumed that your local app is reachable on `127.0.0.1`, and intercepted traffic will be sent to that IP +at the port given by `--port`. If you wish to change this behavior and send traffic to a different IP address, you can use the `--address` parameter +to `telepresence intercept`. Say your machine is configured to respond to HTTP requests for an intercept on `172.16.0.19:8080`. You would run this as: + +```console +$ telepresence intercept my-service --address 172.16.0.19 --port 8080 +Using Deployment echo-easy + Intercept name : echo-easy + State : ACTIVE + Workload kind : Deployment + Destination : 172.16.0.19:8080 + Service Port Identifier: proxied + Volume Mount Point : /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-517018422 + Intercepting : all TCP connections +``` + +## Replacing a running workload + +By default, your application keeps running as Telepresence intercepts it, even if it doesn't receive +any traffic (or receives only a subset, as with personal intercepts). This can pose a problem for applications that are active +even when they're not receiving requests. For instance, if your application consumes from a message queue as soon as it +starts up, intercepting it won't stop the pod from consuming from the queue. + +To work around this issue, `telepresence intercept` allows you to pass in a `--replace` flag that will stop every +application container from running on your pod. When you pass in `--replace`, Telepresence will restart your application +with a dummy application container that sleeps infinitely, and instead just place a traffic agent to redirect traffic to +your local machine. The application container will be restored as soon as you leave the intercept. + +```console +$ telepresence intercept my-service --port 8080 --replace + Intercept name : my-service + State : ACTIVE + Workload kind : Deployment + Destination : 127.0.0.1:8080 + Service Port Identifier: proxied + Volume Mount Point : /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-517018422 + Intercepting : all TCP connections +``` + + +Using the --replace flag implies a global intercept. This is to prevent situations +where multiple personal intercepts are consuming from the same message queue, which +would be the same as allowing the application to do so while an intercept is running. + + + +Sidecars will not be stopped. Only the container serving the intrcepted port will be removed from the pod. + diff --git a/docs/v2.19/reference/intercepts/index.md b/docs/v2.19/reference/intercepts/index.md new file mode 100644 index 00000000..d197cadc --- /dev/null +++ b/docs/v2.19/reference/intercepts/index.md @@ -0,0 +1,28 @@ +import Alert from '@material-ui/lab/Alert'; + +# Intercepts + +When intercepting a service, the Telepresence Traffic Manager ensures +that a Traffic Agent has been injected into the intercepted workload. +The injection is triggered by a Kubernetes Mutating Webhook and will +only happen once. The Traffic Agent is responsible for redirecting +intercepted traffic to the developer's workstation. + +The intercept will intercept all`tcp` and/or `udp` traffic to the +intercepted service and send all of that traffic down to the developer's +workstation. This means that an intercept will affect all users of +the intercepted service. + +## Supported workloads + +Kubernetes has various +[workloads](https://kubernetes.io/docs/concepts/workloads/). +Currently, Telepresence supports intercepting (installing a +traffic-agent on) `Deployments`, `ReplicaSets`, and `StatefulSets`. + + + +While many of our examples use Deployments, they would also work on +ReplicaSets and StatefulSets + + diff --git a/docs/v2.19/reference/monitoring.md b/docs/v2.19/reference/monitoring.md new file mode 100644 index 00000000..2498361b --- /dev/null +++ b/docs/v2.19/reference/monitoring.md @@ -0,0 +1,428 @@ +# Monitoring + +Telepresence offers powerful monitoring capabilities to help you keep a close eye on your telepresence activities and traffic manager metrics. + +## Prometheus Integration + +One of the key features of Telepresence is its seamless integration with Prometheus, which allows you to access real-time metrics and gain insights into your system's performance. With Prometheus, you can monitor various aspects of your traffic manager, including the number of active intercepts and users. Additionally, you can track consumption-related information, such as the number of intercepts used by your developers and how long they stayed connected. + +To enable Prometheus metrics for your traffic manager, follow these steps: + +1. **Configure Prometheus Port** + + First, you'll need to specify the Prometheus port by setting a new environment variable called `PROMETHEUS_PORT` for your traffic manager. You can do this by running the following command: + + ```shell + telepresence helm upgrade --set-string prometheus.port=9090 + ``` + +2. **Validate the Prometheus Exposure** + + After configuring the Prometheus port, you can validate its exposure by port-forwarding the port using Kubernetes: + + ```shell + kubectl port-forward deploy/traffic-manager 9090:9090 -n ambassador + ``` + +3. **Access Prometheus Dashboard** + + Once the port-forwarding is set up, you can access the Prometheus dashboard by navigating to `http://localhost:9090` in your web browser: + + Here, you will find a wealth of built-in metrics, as well as custom metrics (see below) that we have added to enhance your tracking capabilities. + + | **Name** | **Type** | **Description** | **Labels** | + |-----------------------------|----------|-------------------------------------------------------------------------------|------------------------------------------| + | `agent_count` | Gauge | Number of connected traffic agents. | | + | `client_count` | Gauge | Number of connected clients. | | + | `active_intercept_count` | Gauge | Number of active intercepts. | | + | `session_count` | Gauge | Number of sessions. | | + | `tunnel_count` | Gauge | Number of tunnels. | | + | `tunnel_ingress_bytes` | Counter | Number of bytes tunnelled from clients. | | + | `tunnel_egress_bytes` | Counter | Number of bytes tunnelled to clients. | | + | `active_http_request_count` | Gauge | Number of currently served HTTP requests. | | + | `active_grpc_request_count` | Gauge | Number of currently served gRPC requests. | | + | `connect_count` | Counter | The total number of connects by user. | `client`, `install_id` | + | `connect_active_status` | Gauge | Flag to indicate when a connect is active. 1 for active, 0 for not active. | `client`, `install_id` | + | `intercept_count` | Counter | The total number of intercepts by user. | `client`, `install_id`, `intercept_type` | + | `intercept_active_status` | Gauge | Flag to indicate when an intercept is active. 1 for active, 0 for not active. | `client`, `install_id`, `workload` | + +4. **Enable Scraping for Traffic Manager Metrics** + To ensure that these metrics are collected regularly by your Prometheus server and to maintain a historical record, it's essential to enable scraping. If you're using the default Prometheus configuration, you can achieve this by specifying specific pod annotations as follows: + + ```yaml + template: + metadata: + annotations: + prometheus.io/path: / + prometheus.io/port: "9090" + prometheus.io/scrape: "true" + ``` + + These annotations instruct Prometheus to scrape metrics from the Traffic Manager pod, allowing you to track consumption metrics and other important data over time. + +## Grafana Integration + +Grafana plays a crucial role in enhancing Telepresence's monitoring capabilities. While the step-by-step instructions for Grafana integration are not included in this documentation, you have the option to explore the integration process. By doing so, you can create visually appealing and interactive dashboards that provide deeper insights into your telepresence activities and traffic manager metrics. + +Moreover, we've developed a dedicated Grafana dashboard for your convenience. Below, you can find sample screenshots of the dashboard, and you can access the JSON model for configuration: + +**JSON Model:** + +This dashboard is designed to provide you with comprehensive monitoring and visualization tools to effectively manage your Telepresence environment. + +```json +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "barchart", + "name": "Bar chart", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.5" + }, + { + "type": "panel", + "id": "piechart", + "name": "Pie chart", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 5, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "agent_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of connected traffic agents", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 6, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "client_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of connected clients", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 7, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "active_intercept_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of active intercepts", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "session_count", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Number of sessions", + "type": "stat" + }, + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30d", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Telepresence", + "uid": "d99c884a-8f4f-43f8-bd4e-bd68e47f100d", + "version": 5, + "weekStart": "" +} +``` diff --git a/docs/v2.4/reference/rbac.md b/docs/v2.19/reference/rbac.md similarity index 66% rename from docs/v2.4/reference/rbac.md rename to docs/v2.19/reference/rbac.md index 2c9af7c1..c7381238 100644 --- a/docs/v2.4/reference/rbac.md +++ b/docs/v2.19/reference/rbac.md @@ -4,7 +4,7 @@ import Alert from '@material-ui/lab/Alert'; The intention of this document is to provide a template for securing and limiting the permissions of Telepresence. This documentation covers the full extent of permissions necessary to administrate Telepresence components in a cluster. -There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [intercept](../../howtos/intercepts/), and otherwise be unable to affect Kubernetes resources. +There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [intercept](../howtos/intercepts.md), and otherwise be unable to affect Kubernetes resources. In addition to the above, there is also a consideration of how to manage Users and Groups in Kubernetes which is outside of the scope of the document. This document will use Service Accounts to assign Roles and Bindings. Other methods of RBAC administration and enforcement can be found on the [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) page. @@ -41,7 +41,7 @@ After creating `config.yaml` in your current directory, export the file's locati ## Administrating Telepresence -Telepresence administration requires permissions for creating `Namespaces`, `ServiceAccounts`, `ClusterRoles`, `ClusterRoleBindings`, `Secrets`, `Services`, `MutatingWebhookConfiguration`, and for creating the `traffic-manager` [deployment](../architecture/#traffic-manager) which is typically done by a full cluster administrator. The following permissions are needed for the installation and use of Telepresence: +Telepresence administration requires permissions for creating `Namespaces`, `ServiceAccounts`, `ClusterRoles`, `ClusterRoleBindings`, `Secrets`, `Services`, `MutatingWebhookConfiguration`, and for creating the `traffic-manager` [deployment](architecture.md#traffic-manager) which is typically done by a full cluster administrator. The following permissions are needed for the installation and use of Telepresence: ```yaml --- @@ -56,52 +56,41 @@ kind: ClusterRole metadata: name: telepresence-admin-role rules: - - apiGroups: - - "" + - apiGroups: [""] resources: ["pods", "pods/log"] verbs: ["get", "list", "create", "delete", "watch"] - - apiGroups: - - "" + - apiGroups: [""] resources: ["services"] verbs: ["get", "list", "update", "create", "delete"] - - apiGroups: - - "" + - apiGroups: [""] resources: ["pods/portforward"] verbs: ["create"] - - apiGroups: - - "apps" + - apiGroups: ["apps"] resources: ["deployments", "replicasets", "statefulsets"] verbs: ["get", "list", "update", "create", "delete", "watch"] - - apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] - - apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list"] - - apiGroups: - - "rbac.authorization.k8s.io" + - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterroles", "clusterrolebindings", "roles", "rolebindings"] verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: - - "" + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "delete"] + resourceNames: ["telepresence-agents"] + - apiGroups: [""] resources: ["namespaces"] verbs: ["get", "list", "watch", "create"] - - apiGroups: - - "" + - apiGroups: [""] resources: ["secrets"] verbs: ["get", "create", "list", "delete"] - - apiGroups: - - "" + - apiGroups: [""] resources: ["serviceaccounts"] verbs: ["get", "create", "delete"] - - apiGroups: - - "admissionregistration.k8s.io" + - apiGroups: ["admissionregistration.k8s.io"] resources: ["mutatingwebhookconfigurations"] verbs: ["get", "create", "delete"] - - apiGroups: - - "" + - apiGroups: [""] resources: ["nodes"] verbs: ["list", "get", "watch"] --- @@ -119,7 +108,7 @@ roleRef: kind: ClusterRole ``` -There are two ways to install the traffic-manager: Using `telepresence connect` and installing the [helm chart](../../install/helm/). +There are two ways to install the traffic-manager: Using `telepresence connect` and installing the [helm chart](../install/manager.md). By using `telepresence connect`, Telepresence will use your kubeconfig to create the objects mentioned above in the cluster if they don't already exist. If you want the most introspection into what is being installed, we recommend using the helm chart to install the traffic-manager. @@ -142,37 +131,19 @@ kind: ClusterRole metadata: name: telepresence-role rules: -- apiGroups: - - "" - resources: ["pods", "pods/log"] - verbs: ["get", "list", "create", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" +# For gather-logs command +- apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["list"] +# Needed in order to maintain a list of workloads +- apiGroups: ["apps"] resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "patch"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list"] -- apiGroups: - - "rbac.authorization.k8s.io" - resources: ["clusterroles", "clusterrolebindings"] verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["namespaces"] +- apiGroups: [""] + resources: ["namespaces", "services"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 @@ -189,103 +160,77 @@ roleRef: kind: ClusterRole ``` +### Traffic Manager connect permission +In addition to the cluster-wide permissions, the client will also need the following namespace scoped permissions +in the traffic-manager's namespace in order to establish the needed port-forward to the traffic-manager. +```yaml +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: traffic-manager-connect +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods/portforward"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: traffic-manager-connect +subjects: + - name: telepresence-test-developer + kind: ServiceAccount + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + name: traffic-manager-connect + kind: Role +``` + ## Namespace only telepresence user access RBAC for multi-tenant scenarios where multiple dev teams are sharing a single cluster where users are constrained to a specific namespace(s). The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator +For each accessible namespace ```yaml --- apiVersion: v1 kind: ServiceAccount metadata: - name: tp-user # Update value for appropriate user name - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace + name: tp-user # Update value for appropriate user name + namespace: tp-namespace # Update value for appropriate namespace --- -kind: ClusterRole +kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: telepresence-role + namespace: tp-namespace # Should be the same as metadata.namespace of above ServiceAccount rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" +- apiGroups: [""] resources: ["services"] - verbs: ["update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] verbs: ["get", "list", "watch"] --- -kind: RoleBinding # RBAC to access ambassador namespace -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: t2-ambassador-binding - namespace: ambassador -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding # RoleBinding T2 namespace to be intecpeted +kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: telepresence-test-binding # Update "test" for appropriate namespace to be intercepted - namespace: test # Update "test" for appropriate namespace to be intercepted + name: telepresence-role-binding + namespace: tp-namespace # Should be the same as metadata.namespace of above ServiceAccount subjects: - kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador + name: tp-user # Should be the same as metadata.name of above ServiceAccount roleRef: - kind: ClusterRole + kind: Role name: telepresence-role apiGroup: rbac.authorization.k8s.io -​ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-role -rules: -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-binding -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-namespace-role - apiGroup: rbac.authorization.k8s.io ``` + +The user will also need the [Traffic Manager connect permission](#traffic-manager-connect-permission) described above. diff --git a/docs/v2.19/reference/routing.md b/docs/v2.19/reference/routing.md new file mode 100644 index 00000000..74220dc6 --- /dev/null +++ b/docs/v2.19/reference/routing.md @@ -0,0 +1,51 @@ +# Connection Routing + +## DNS resolution +When requesting a connection to a host, the IP of that host must be determined. Telepresence provides DNS resolvers to help with this task. There are currently four types of resolvers but only one of them will be used on a workstation at any given time. Common for all of them is that they will propagate a selection of the host lookups to be performed in the cluster. The selection normally includes all names ending with `.cluster.local` or a currently mapped namespace but more entries can be added to the list using the `includeSuffixes` or `mappings` option in the +[cluster DNS configuration](config.md#dns) + +### Cluster side DNS lookups +The cluster side host lookup will be performed by a traffic-agent in the connected namespace, or by the traffic-manager if no such agent exists. + +### macOS resolver +This resolver hooks into the macOS DNS system by creating files under `/etc/resolver`. Those files correspond to some domain and contain the port number of the Telepresence resolver. Telepresence creates one such file for each of the currently mapped namespaces and `include-suffixes` option. The file `telepresence.local` contains a search path that is configured based on current intercepts so that single label names can be resolved correctly. + +### Linux systemd-resolved resolver +This resolver registers itself as part of telepresence's [VIF](tun-device.md) using `systemd-resolved` and uses the DBus API to configure domains and routes that corresponds to the current set of intercepts and namespaces. + +### Linux overriding resolver +Linux systems that aren't configured with `systemd-resolved` will use this resolver. A Typical case is when running Telepresence [inside a docker container](inside-container.md). During initialization, the resolver will first establish a _fallback_ connection to the IP passed as `--dns`, the one configured as `local-ip` in the [local DNS configuration](config.md#dns), or the primary `nameserver` registered in `/etc/resolv.conf`. It will then use iptables to actually override that IP so that requests to it instead end up in the overriding resolver, which unless it succeeds on its own, will use the _fallback_. + +### Windows resolver +This resolver uses the DNS resolution capabilities of the [win-tun](https://www.wintun.net/) device in conjunction with [Win32_NetworkAdapterConfiguration SetDNSDomain](https://docs.microsoft.com/en-us/powershell/scripting/samples/performing-networking-tasks?view=powershell-7.2#assigning-the-dns-domain-for-a-network-adapter). + +### DNS caching +The Telepresence DNS resolver often changes its configuration. Telepresence will not flush the host's DNS caches. Instead, all records will have a short Time To Live (TTL) so that such caches evict the entries quickly. This causes increased load on the Telepresence resolver (shorter TTL means more frequent queries) and to cater for that, telepresence now has an internal cache to minimize the number of DNS queries that it sends to the cluster. This cache is flushed as needed without causing instabilities. + +## Routing + +### Subnets +The Telepresence `traffic-manager` service is responsible for discovering the cluster's service subnet and all subnets used by the pods. In order to do this, it needs permission to create a dummy service[[2](#servicesubnet)] in its own namespace, and the ability to list, get, and watch nodes and pods. Most clusters will expose the pod subnets as `podCIDR` in the `Node` while others, like Amazon EKS, don't. Telepresence will then fall back to deriving the subnets from the IPs of all pods. If you'd like to choose a specific method for discovering subnets, or want to provide the list yourself, you can use the `podCIDRStrategy` configuration value in the [helm](../install/manager.md) chart to do that. + +The complete set of subnets that the [VIF](tun-device.md) will be configured with is dynamic and may change during a connection's life cycle as new nodes arrive or disappear from the cluster. The set consists of what that the traffic-manager finds in the cluster, and the subnets configured using the [also-proxy](config.md#alsoproxysubnets) configuration option. Telepresence will remove subnets that are equal to, or completely covered by, other subnets. + +### Connection origin +A request to connect to an IP-address that belongs to one of the subnets of the [VIF](tun-device.md) will cause a connection request to be made in the cluster. As with host name lookups, the request will originate from a traffic-agent in the connected namespace, of by the traffic-manager when no agent is present. + +There are multiple reasons for doing this. One is that it is important that the request originates from the correct namespace. Example: + +```bash +curl some-host +``` +results in a http request with header `Host: some-host`. Now, if a service-mesh like Istio performs header based routing, then it will fail to find that host unless the request originates from the same namespace as the host resides in. Another reason is that the configuration of a service mesh can contain very strict rules. If the request then originates from the wrong pod, it will be denied. Only one intercept at a time can be used if there is a need to ensure that the chosen pod is exactly right. + +## Recursion detection +It is common that clusters used in development, such as Minikube, Minishift or k3s, run on the same host as the Telepresence client, often in a Docker container. Such clusters may have access to host network, which means that both DNS and L4 routing may be subjected to recursion. + +### DNS recursion +When a local cluster's DNS-resolver fails to resolve a hostname, it may fall back to querying the local host network. This means that the Telepresence resolver will be asked to resolve a query that was issued from the cluster. Telepresence must check if such a query is recursive because there is a chance that it actually originated from the Telepresence DNS resolver and was dispatched to the `traffic-manager`, or a `traffic-agent`. + +Telepresence handles this by sending one initial DNS-query to resolve the hostname "tel2-recursion-check.kube-system". If the cluster runs locally, and has access to the local host's network, then that query will recurse back into the Telepresence resolver. Telepresence remembers this and alters its own behavior so that queries that are believed to be recursions are detected and respond with an NXNAME record. Telepresence performs this solution to the best of its ability, but may not be completely accurate in all situations. There's a chance that the DNS-resolver will yield a false negative for the second query if the same hostname is queried more than once in rapid succession, that is when the second query is made before the first query has received a response from the cluster. + +##### Footnotes: +

1: The error message from an attempt to create a service in a bad subnet contains the service subnet. The trick of creating a dummy service is currently the only way to get Kubernetes to expose that subnet.

diff --git a/docs/pre-release/reference/tun-device.md b/docs/v2.19/reference/tun-device.md similarity index 90% rename from docs/pre-release/reference/tun-device.md rename to docs/v2.19/reference/tun-device.md index 4410f6f3..af7e3828 100644 --- a/docs/pre-release/reference/tun-device.md +++ b/docs/v2.19/reference/tun-device.md @@ -8,7 +8,7 @@ The VIF is a TUN-device, which means that it communicates with the workstation i ## Gains when using the VIF ### Both TCP and UDP -The TUN-device is capable of routing both TCP and UDP for outbound traffic. Earlier versions of Telepresence would only allow TCP. Future enhancements might be to also route inbound UDP, and perhaps a selection of ICMP packages (to allow for things like `ping`). +The TUN-device is capable of routing both TCP and UDP traffic. ### No SSH required diff --git a/docs/v2.4/reference/volume.md b/docs/v2.19/reference/volume.md similarity index 90% rename from docs/v2.4/reference/volume.md rename to docs/v2.19/reference/volume.md index 82df9caf..aeb8055f 100644 --- a/docs/v2.4/reference/volume.md +++ b/docs/v2.19/reference/volume.md @@ -33,4 +33,6 @@ With either method, the code you run locally either from the subshell or from th For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. + +If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. + diff --git a/docs/v2.19/reference/vpn.md b/docs/v2.19/reference/vpn.md new file mode 100644 index 00000000..3e7ea1e6 --- /dev/null +++ b/docs/v2.19/reference/vpn.md @@ -0,0 +1,154 @@ +import Alert from '@material-ui/lab/Alert'; + + +
+ +# Telepresence and VPNs + +It is often important to set up Kubernetes API server endpoints to be only accessible via a VPN. +In setups like these, users need to connect first to their VPN, and then use Telepresence to connect +to their cluster. As Telepresence uses many of the same underlying technologies that VPNs use, +the two can sometimes conflict. This page will help you identify and resolve such VPN conflicts. + + + +The test-vpn command, which was once part of Telepresence, became obsolete in 2.14 due to a change in functionality and was subsequently removed. + + + +## VPN Configuration + +Let's begin by reviewing what a VPN does and imagining a sample configuration that might come +to conflict with Telepresence. +Usually, a VPN client adds two kinds of routes to your machine when you connect. +The first serves to override your default route; in other words, it makes sure that packets +you send out to the public internet go through the private tunnel instead of your +ethernet or wifi adapter. We'll call this a `public VPN route`. +The second kind of route is a `private VPN route`. These are the routes that allow your +machine to access hosts inside the VPN that are not accessible to the public internet. +Generally speaking, this is a more circumscribed route that will connect your machine +only to reachable hosts on the private network, such as your Kubernetes API server. + +This diagram represents what happens when you connect to a VPN, supposing that your +private network spans the CIDR range: `10.0.0.0/8`. + +![VPN routing](../images/vpn-routing.jpg) + +## Kubernetes configuration + +One of the things a Kubernetes cluster does for you is assign IP addresses to pods and services. +This is one of the key elements of Kubernetes networking, as it allows applications on the cluster +to reach each other. When Telepresence connects you to the cluster, it will try to connect you +to the IP addresses that your cluster assigns to services and pods. +Cluster administrators can configure, on cluster creation, the CIDR ranges that the Kubernetes +cluster will place resources in. Let's imagine your cluster is configured to place services in +`10.130.0.0/16` and pods in `10.132.0.0/16`: + +![VPN Kubernetes config](../images/vpn-k8s-config.jpg) + +## Telepresence conflicts + +When you run `telepresence connect` to connect to a cluster, it talks to the API server +to figure out what pod and service CIDRs it needs to map in your machine. If it detects +that these CIDR ranges are already mapped by a VPN's `private route`, it will produce an +error and inform you of the conflicting subnets: + +```console +$ telepresence connect +telepresence connect: error: connector.Connect: failed to connect to root daemon: rpc error: code = Unknown desc = subnet 10.43.0.0/16 overlaps with existing route "10.0.0.0/8 via 10.0.0.0 dev utun4, gw 10.0.0.1" +``` + +Telepresence offers three different ways to resolve this: + +- [Allow the conflict](#allowing-the-conflict) in a controlled manner +- [Avoid the conflict](#avoiding-the-conflict) using the `--proxy-via` connect flag +- [Use docker](#using-docker) to make telepresence run in a container with its own network config + +### Allowing the conflict + +One way to resolve this, is to carefully consider what your network layout looks like, and +then allow Telepresence to override the conflicting subnets. +Telepresence is refusing to map them, because mapping them could render certain hosts that +are inside the VPN completely unreachable. However, you (or your network admin) know better +than anyone how hosts are spread out inside your VPN. +Even if the private route routes ALL of `10.0.0.0/8`, it's possible that hosts are only +being spun up in one of the subblocks of the `/8` space. Let's say, for example, +that you happen to know that all your hosts in the VPN are bunched up in the first +half of the space -- `10.0.0.0/9` (and that you know that any new hosts will +only be assigned IP addresses from the `/9` block). In this case you +can configure Telepresence to override the other half of this CIDR block, which is where the +services and pods happen to be. +To do this, all you have to do is configure the `client.routing.allowConflictingSubnets` flag +in the Telepresence helm chart. You can do this directly via `telepresence helm upgrade`: + +```console +$ telepresence helm upgrade --set client.routing.allowConflictingSubnets="{10.128.0.0/9}" +``` + +You can also choose to be more specific about this, and only allow the CIDRs that you KNOW +are in use by the cluster: + +```console +$ telepresence helm upgrade --set client.routing.allowConflictingSubnets="{10.130.0.0/16,10.132.0.0/16}" +``` + +The end result of this (assuming an allow list of `/9`) will be a configuration like this: + +![VPN Telepresence](../images/vpn-with-tele.jpg) + +### Avoiding the conflict + +An alternative to allowing the conflict is to remap the cluster's CIDRs to virtual CIRDs +on the workstation by passing a `--proxy-via` flag to `teleprence connect`. + +The `telepresence connect` flag `--proxy-via`, introduced in Telepresence 2.19, will allow the local DNS-server to translate cluster subnets to virtual subnets on the workstation, and the VIF to do the reverse translation. The syntax for this new flag, which can be repeated, is: + +```console +$ telepresence connect --proxy-via CIDR=WORKLOAD +``` +Cluster DNS responses matching CIDR to virtual IPs that are routed (with reverse translation) via WORKLOAD. The CIDR can also be a symbolic name that identifies a subnet or list of subnets: + +| Symbol | Meaning | +|-----------|-------------------------------------| +| `also` | All subnets added with --also-proxy | +| `service` | The cluster's service subnet | +| `pods` | The cluster's pod subnets. | +| `all` | All of the above. | + +The WORKLOAD is the deployment, replicaset, or statefulset in the cluster whose agent will be used for targeting the routed subnets. + +This is useful in two situations: + +1. The cluster's subnets collide with subnets otherwise available on the workstation. This is common when using a VPN, in particular if the VPN has a small subnet mask, making the subnet itself very large. The new `--proxy-via` flag can be used as an alternative to [allowing the conflict](#allowing-the-conflict) to take place, give Telepresence precedence, and thus hide the corresponding subnets from the conflicting subnet. The `--proxy-via` will instead reroute the cluster's subnet and hence, avoid the conflict. +2. The cluster's DNS is configured with domains that resolve to loop-back addresses (this is sometimes the case when the cluster uses a mesh configured to listen to a loopback address and then reroute from there). A loop-back address is not useful on the client, but the `--proxy-via` can reroute the loop-back address to a virtual IP that the client can use. + +Subnet proxying is done by the client's DNS-resolver which translates the IPs returned by the cluster's DNS resolver to a virtual IP (VIP) to use on the client. Telepresence's VIF will detect when the VIP is used, and translate it back to the loop-back address on the pod. + +#### Proxy-via and using IP-addresses directly + +If the service is using IP-addresses instead of domain-names when connecting to other cluster resources, then such connections will fail when running locally. The `--proxy-via` relies on the local DNS-server to translate the cluster's DNS responses, so that the IP of an `A` or `AAAA` response is replaced with a virtual IP from the configured subnet. If connections are made using an IP instead of a domain-name, then no such lookup is made. Telepresence has no way of detecting the direct use of IP-addresses. + +#### Virtual IP Configuration + +Telepresence will use a special subnet when it generates the virtual IPs that are used locally. On a Linux or macOS workstation, this subnet will be +a class E subnet (not normally used for any other purposes). On Windows, the class E is not routed, and Telepresence will instead default to `211.55.48.0/20`. + +The default can be changed using the configuration `cluster.virtualIPSubnet`. + +#### Example + +Let's assume that we have a conflict between the cluster's subnets, all covered by the CIDR `10.124.0.0/9` and a VPN using `10.0.0.0/9`. We avoid the conflict using: + +```console +$ telepresence connect --proxy-via all=echo +``` + +The cluster's subnets are now hidden behind a virtual subnet, and the resulting configuration will look like this: + +![VPN Telepresence](../images/vpn-proxy-via.jpg) + +### Using docker + +Use `telepresence connect --docker` to make the Telepresence daemon containerized, which means that it has its own network configuration and therefore no conflict with a VPN. Read more about docker [here](docker-run.md). + +
diff --git a/docs/v2.19/release-notes.md b/docs/v2.19/release-notes.md new file mode 100644 index 00000000..7f40f5ff --- /dev/null +++ b/docs/v2.19/release-notes.md @@ -0,0 +1,714 @@ + +[comment]: # (Code generated by relnotesgen. DO NOT EDIT.) +# Telepresence Release Notes +## Version 2.19.1 (July 12) +##
feature
[Add brew support for the OSS version of Telepresence.](https://github.com/telepresenceio/telepresence/issues/3609)
+
+ +The Open-Source Software version of Telepresence can now be installed using the brew formula via brew install datawire/blackbird/telepresence-oss. +
+ +##
feature
Add --create-namespace flag to the telepresence helm install command.
+
+ +A --create-namespace (default true) flag was added to the telepresence helm install command. No attempt will be made to create a namespace for the traffic-manager if it is explicitly set to false. The command will then fail if the namespace is missing. +
+ +##
feature
Introduce DNS fallback on Windows.
+
+ +A network.defaultDNSWithFallback config option has been introduced on Windows. It will cause the DNS-resolver to fall back to the resolver that was first in the list prior to when Telepresence establishes a connection. The option is default true since it is believed to give the best experience but can be set to false to restore the old behavior. +
+ +##
feature
[Brew now supports MacOS (amd64/arm64) / Linux (amd64)](https://github.com/datawire/homebrew-blackbird/issues/19)
+
+ +The brew formula can now dynamically support MacOS (amd64/arm64) / Linux (amd64) in a single formula +
+ +##
feature
Add ability to provide an externally-provisioned webhook secret
+
+ +Added supplied as a new option for agentInjector.certificate.method. This fully disables the generation of the Mutating Webhook's secret, allowing the chart to use the values of a pre-existing secret named agentInjector.secret.name. Previously, the install would fail when it attempted to create or update the externally-managed secret. +
+ +##
feature
Let PTR query for DNS server return the cluster domain.
+
+ +The nslookup program on Windows uses a PTR query to retrieve its displayed "Server" property. This Telepresence DNS resolver will now return the cluster domain on such a query. +
+ +##
feature
Add scheduler name to PODs templates.
+
+ +A new Helm chart value schedulerName has been added. With this feature, we are able to define some particular schedulers from Kubernetes to apply some different strategies to allocate telepresence resources, including the Traffic Manager and hooks pods. +
+ +##
bugfix
Race in traffic-agent injector when using inject annotation
+
+ +Applying multiple deployments that used the telepresence.getambassador.io/inject-traffic-agent: enabled would cause a race condition, resulting in a large number of created pods that eventually had to be deleted, or sometimes in pods that didn't contain a traffic agent. +
+ +##
bugfix
Fix configuring custom agent security context
+
+ +-> The traffic-manager helm chart will now correctly use a custom agent security context if one is provided. +
+ +## Version 2.19.0 (June 15) +##
feature
Warn when an Open Source Client connects to an Enterprise Traffic Manager.
+
+ +The difference between the OSS and the Enterprise offering is not well understood, and OSS users often install a traffic-manager using the Helm chart published at getambassador.io. This Helm chart installs an enterprise traffic-manager, which is probably not what the user would expect. Telepresence will now warn when an OSS client connects to an enterprise traffic-manager and suggest switching to an enterprise client, or use telepresence helm install to install an OSS traffic-manager. +
+ +##
feature
Add scheduler name to PODs templates.
+
+ +A new Helm chart value schedulerName has been added. With this feature, we are able to define some particular schedulers from Kubernetes to apply some different strategies to allocate telepresence resources, including the Traffic Manager and hooks pods. +
+ +##
bugfix
Improve traffic-manager performance in very large clusters.
+
+ +-> The traffic-manager will now use a shared-informer when keeping track of deployments. This will significantly reduce the load on the Kublet in large clusters and therefore lessen the risk for the traffic-manager being throttled, which can lead to other problems. +
+ +##
bugfix
Kubeconfig exec authentication failure when connecting with --docker from a WSL linux host
+
+ +Clusters like Amazon EKS often use a special authentication binary that is declared in the kubeconfig using an exec authentication strategy. This binary is normally not available inside a container. Consequently, a modified kubeconfig is used when telepresence connect --docker executes, appointing a kubeauth binary which instead retrieves the authentication from a port on the Docker host that communicates with another process outside of Docker. This process then executes the original exec command to retrieve the necessary credentials. +This setup was problematic when using WSL, because even though telepresence connect --docker was executed on a Linux host, the Docker host available from host.docker.internal that the kubeauth connected to was the Windows host running Docker Desktop. The fix for this was to use the local IP of the default route instead of host.docker.internal when running under WSL.. +
+ +##
bugfix
Fix bug in workload cache, causing endless recursion when a workload uses the same name as its owner.
+
+ +The workload cache was keyed by name and namespace, but not by kind, so a workload named the same as its owner workload would be found using the same key. This led to the workload finding itself when looking up its owner, which in turn resulted in an endless recursion when searching for the topmost owner. +
+ +##
bugfix
FailedScheduling events mentioning node availability considered fatal when waiting for agent to arrive.
+
+ +The traffic-manager considers some events as fatal when waiting for a traffic-agent to arrive after an injection has been initiated. This logic would trigger on events like "Warning FailedScheduling 0/63 nodes are available" although those events indicate a recoverable condition and kill the wait. This is now fixed so that the events are logged but the wait continues. +
+ +##
bugfix
Improve how the traffic-manager resolves DNS when no agent is installed.
+
+ +The traffic-manager is typically installed into a namespace different from the one that clients are connected to. It's therefore important that the traffic-manager adds the client's namespace when resolving single label names in situations where there are any agents to dispatch the DNS query to. +
+ +##
change
Removal of ability import legacy artifact into Helm.
+
+ +A helm install would make attempts to find manually installed artifacts and make them managed by Helm by adding the necessary labels and annotations. This was important when the Helm chart was first introduced but is far less so today, and this legacy import was therefore removed. +
+ +##
bugfix
[Docker aliases deprecation caused failure to detect Kind cluster.](https://docs.docker.com/engine/deprecated/#container-short-id-in-network-aliases-field)
+
+ +The logic for detecting if a cluster is a local Kind cluster, and therefore needs some special attention when using telepresence connect --docker, relied on the presence of Aliases in the Docker network that a Kind cluster sets up. In Docker versions from 26 and up, this value is no longer used, but the corresponding info can instead be found in the new DNSNames field. +
+ +##
bugfix
[Include svc as a top-level domain in the DNS resolver.](https://github.com/telepresenceio/telepresence/issues/2814)
+
+ +It's not uncommon that use-cases involving Kafka or other middleware use FQNs that end with "svc". The core-DNS resolver in Kubernetes can resolve such names. With this bugfix, the Telepresence DNS resolver will also be able to resolve them, and thereby remove the need to add ".svc" to the include-suffix list. +
+ +##
feature
Add ability to enable/disable the mutating webhook.
+
+ +A new Helm chart boolean value agentInjector.enable has been added that controls the agent-injector service and its associated mutating webhook. If set to false, the service, the webhook, and the secrets and certificates associated with it, will no longer be installed. +
+ +##
feature
Add ability to mount a webhook secret.
+
+ +A new Helm chart value agentInjector.certificate.accessMethod which can be set to watch (the default) or mount has been added. The mount setting is intended for clusters with policies that prevent containers from doing a get, list or watch of a Secret, but where a latency of up to 90 seconds is acceptable between the time the secret is regenerated and the agent-injector picks it up. +
+ +##
feature
Make it possible to specify ignored volume mounts using path prefix.
+
+ +Volume mounts like /var/run/secrets/kubernetes.io are not declared in the workload. Instead, they are injected during pod-creation and their names are generated. It is now possible to ignore such mounts using a matching path prefix. +
+ +##
feature
Make the telemount Docker Volume plugin configurable
+
+ +A telemount object was added to the intercept object in config.yml (or Helm value client.intercept), so that the automatic download and installation of this plugin can be fully customised. +
+ +##
feature
Add option to load the kubeconfig yaml from stdin during connect.
+
+ +This allows another process with a kubeconfig already loaded in memory to directly pass it to telepresence connect without needing a separate file. Simply use a dash "-" as the filename for the --kubeconfig flag. +
+ +##
feature
Add ability to specify agent security context.
+
+ +A new Helm chart value agent.securityContext that will allow configuring the security context of the injected traffic agent. The value can be set to a valid Kubernetes securityContext object, or can be set to an empty value ({}) to ensure the agent has no defined security context. If no value is specified, the traffic manager will set the agent's security context to the same as the first container's of the workload being injected into. +
+ +##
change
Tracing is no longer enabled by default.
+
+ +Tracing must now be enabled explicitly in order to use the telepresence gather-traces command. +
+ +##
change
Removal of timeouts that are no longer in use
+
+ +The config.yml values timeouts.agentInstall and timeouts.apply haven't been in use since versions prior to 2.6.0, when the client was responsible for installing the traffic-agent. These timeouts are now removed from the code-base, and a warning will be printed when attempts are made to use them. +
+ +##
bugfix
Search all private subnets to find one open for dnsServerSubnet
+
+ +This resolves a bug that did not test all subnets in a private range, sometimes resulting in the warning, "DNS doesn't seem to work properly." +
+ +##
bugfix
Docker aliases deprecation caused failure to detect Kind cluster.
+
+ +The logic for detecting if a cluster is a local Kind cluster, and therefore needs some special attention when using telepresence connect --docker, relied on the presence of Aliases in the Docker network that a Kind cluster sets up. In Docker versions from 26 and up, this value is no longer used, but the corresponding info can instead be found in the new DNSNames field. +
+ +##
bugfix
Creation of individual pods was blocked by the agent-injector webhook.
+
+ +An attempt to create a pod was blocked unless it was provided by a workload. Hence, commands like kubectl run -i busybox --rm --image=curlimages/curl --restart=Never -- curl echo-easy.default would be blocked from executing. +
+ +##
bugfix
Fix panic due to root daemon not running.
+
+ +If a telepresence connect was made at a time when the root daemon was not running (an abnormal condition) and a subsequent intercept was then made, a panic would occur when the port-forward to the agent was set up. This is now fixed so that the initial telepresence connect is refused unless the root daemon is running. +
+ +##
bugfix
Get rid of telemount plugin stickiness
+
+ +The datawire/telemount that is automatically downloaded and installed, would never be updated once the installation was made. Telepresence will now check for the latest release of the plugin and cache the result of that check for 24 hours. If a new version arrives, it will be installed and used. +
+ +##
bugfix
Use route instead of address for CIDRs with masks that don't allow "via"
+
+ +A CIDR with a mask that leaves less than two bits (/31 or /32 for IPv4) cannot be added as an address to the VIF, because such addresses must have bits allowing a "via" IP. +The logic was modified to allow such CIDRs to become static routes, using the VIF base address as their "via", rather than being VIF addresses in their own right. +
+ +##
bugfix
Containerized daemon created cache files owned by root
+
+ +When using telepresence connect --docker to create a containerized daemon, that daemon would sometimes create files in the cache that were owned by root, which then caused problems when connecting without the --docker flag. +
+ +##
bugfix
Remove large number of requests when traffic-manager is used in large clusters.
+
+ +The traffic-manager would make a very large number of API requests during cluster start-up or when many services were changed for other reasons. The logic that did this was refactored and the number of queries were significantly reduced. +
+ +##
bugfix
Don't patch probes on replaced containers.
+
+ +A container that is being replaced by a telepresence intercept --replace invocation will have no liveness-, readiness, nor startup-probes. Telepresence didn't take this into consideration when injecting the traffic-agent, but now it will refrain from patching symbolic port names of those probes. +
+ +##
bugfix
Don't rely on context name when deciding if a kind cluster is used.
+
+ +The code that auto-patches the kubeconfig when connecting to a kind cluster from within a docker container, relied on the context name starting with "kind-", but although all contexts created by kind have that name, the user is still free to rename it or to create other contexts using the same connection properties. The logic was therefore changed to instead look for a loopback service address. +
+ +## Version 2.18.0 (February 9) +##
feature
Include the image for the traffic-agent in the output of the version and status commands.
+
+ +The version and status commands will now output the image that the traffic-agent will be using when injected by the agent-injector. +
+ +##
feature
Custom DNS using the client DNS resolver.
+
+ +

A new telepresence connect --proxy-via CIDR=WORKLOAD flag was introduced, allowing Telepresence to translate DNS responses matching specific subnets into virtual IPs that are used locally. Those virtual IPs are then routed (with reverse translation) via the pod's of a given workload. This makes it possible to handle custom DNS servers that resolve domains into loopback IPs. The flag may also be used in cases where the cluster's subnets are in conflict with the workstation's VPN.

The CIDR can also be a symbolic name that identifies a subnet or list of subnets:
alsoAll subnets added with --also-proxy
serviceThe cluster's service subnet
podsThe cluster's pod subnets.
allAll of the above.

+
+ +##
bugfix
Ensure that agent.appProtocolStrategy is propagated correctly.
+
+ +The agent.appProtocolStrategy was inadvertently dropped when moving license related code fromm the OSS repository the repository for the Enterprise version of Telepresence. It has now been restored. +
+ +##
bugfix
Include non-default zero values in output of telepresence config view.
+
+ +The telepresence config view command will now print zero values in the output when the default for the value is non-zero. +
+ +##
bugfix
Restore ability to run the telepresence CLI in a docker container.
+
+ +The improvements made to be able to run the telepresence daemon in docker using telepresence connect --docker made it impossible to run both the CLI and the daemon in docker. This commit fixes that and also ensures that the user- and root-daemons are merged in this scenario when the container runs as root. +
+ +##
bugfix
Remote mounts when intercepting with the --replace flag.
+
+ +A telepresence intercept --replace did not correctly mount all volumes, because when the intercepted container was removed, its mounts were no longer visible to the agent-injector when it was subjected to a second invocation. The container is now kept in place, but with an image that just sleeps infinitely. +
+ +##
bugfix
Intercepting with the --replace flag will no longer require all subsequent intercepts to use --replace.
+
+ +A telepresence intercept --replace will no longer switch the mode of the intercepted workload, forcing all subsequent intercepts on that workload to use --replace until the agent is uninstalled. Instead, --replace can be used interchangeably just like any other intercept flag. +
+ +##
bugfix
Kubeconfig exec authentication with context names containing colon didn't work on Windows
+
+ +The logic added to allow the root daemon to connect directly to the cluster using the user daemon as a proxy for exec type authentication in the kube-config, didn't take into account that a context name sometimes contains the colon ":" character. That character cannot be used in filenames on windows because it is the drive letter separator. +
+ +##
bugfix
Provide agent name and tag as separate values in Helm chart
+
+ +The AGENT_IMAGE was a concatenation of the agent's name and tag. This is now changed so that the env instead contains an AGENT_IMAGE_NAME and AGENT_INAGE_TAG. The AGENT_IMAGE is removed. Also, a new env REGISTRY is added, where the registry of the traffic- manager image is provided. The AGENT_REGISTRY is no longer required and will default to REGISTRY if not set. +
+ +##
bugfix
Environment interpolation expressions were prefixed twice.
+
+ +Telepresence would sometimes prefix environment interpolation expressions in the traffic-agent twice so that an expression that looked like $(SOME_NAME) in the app-container, ended up as $(_TEL_APP_A__TEL_APP_A_SOME_NAME) in the corresponding expression in the traffic-agent. +
+ +##
bugfix
Panic in root-daemon on darwin workstations with full access to cluster network.
+
+ +A darwin machine with full access to the cluster's subnets will never create a TUN-device, and a check was missing if the device actually existed, which caused a panic in the root daemon. +
+ +##
bugfix
Show allow-conflicting-subnets in telepresence status and telepresence config view.
+
+ +The telepresence status and telepresence config view commands didn't show the allowConflictingSubnets CIDRs because the value wasn't propagated correctly to the CLI. +
+ +##
feature
It is now possible use a host-based connection and containerized connections simultaneously.
+
+ +Only one host-based connection can exist because that connection will alter the DNS to reflect the namespace of the connection. but it's now possible to create additional connections using --docker while retaining the host-based connection. +
+ +##
feature
Ability to set the hostname of a containerized daemon.
+
+ +The hostname of a containerized daemon defaults to be the container's ID in Docker. You now can override the hostname using telepresence connect --docker --hostname <a name>. +
+ +##
feature
New --multi-daemonflag to enforce a consistent structure for the status command output.
+
+ +The output of the telepresence status when using --output json or --output yaml will either show an object where the user_daemon and root_daemon are top level elements, or when multiple connections are used, an object where a connections list contains objects with those daemons. The flag --multi-daemon will enforce the latter structure even when only one daemon is connected so that the output can be parsed consistently. The reason for keeping the former structure is to retain backward compatibility with existing parsers. +
+ +##
bugfix
Make output from telepresence quit more consistent.
+
+ +A quit (without -s) just disconnects the host user and root daemons but will quit a container based daemon. The message printed was simplified to remove some have/has is/are errors caused by the difference. +
+ +##
bugfix
Fix "tls: bad certificate" errors when refreshing the mutator-webhook secret
+
+ +The agent-injector service will now refresh the secret used by the mutator-webhook each time a new connection is established, thus preventing the certificates to go out-of-sync when the secret is regenerated. +
+ +##
bugfix
Keep telepresence-agents configmap in sync with pod states.
+
+ +An intercept attempt that resulted in a timeout due to failure of injecting the traffic-agent left the telepresence-agents configmap in a state that indicated that an agent had been added, which caused problems for subsequent intercepts after the problem causing the first failure had been fixed. +
+ +##
bugfix
The telepresence status command will now report the status of all running daemons.
+
+ +A telepresence status, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead reports the status of all running daemons. +
+ +##
bugfix
The telepresence version command will now report the version of all running daemons.
+
+ +A telepresence version, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead reports the version of all running daemons. +
+ +##
bugfix
Multiple containerized daemons can now be disconnected using telepresence quit -s
+
+ +A telepresence quit -s, issued when multiple containerized daemons were active, would error with "multiple daemons are running, please select one using the --use <match> flag". This is now fixed so that the command instead quits all daemons. +
+ +##
bugfix
The DNS search path on Windows is now restored when Telepresence quits
+
+ +The DNS search path that Telepresence uses to simulate the DNS lookup functionality in the connected cluster namespace was not removed by a telepresence quit, resulting in connectivity problems from the workstation. Telepresence will now remove the entries that it has added to the search list when it quits. +
+ +##
bugfix
The user-daemon would sometimes get killed when used by multiple simultaneous CLI clients.
+
+ +The user-daemon would die with a fatal "fatal error: concurrent map writes" error in the connector.log, effectively killing the ongoing connection. +
+ +##
bugfix
Multiple services ports using the same target port would not get intercepted correctly.
+
+ +Intercepts didn't work when multiple service ports were using the same container port. Telepresence would think that one of the ports wasn't intercepted and therefore disable the intercept of the container port. +
+ +##
bugfix
Root daemon refuses to disconnect.
+
+ +The root daemon would sometimes hang forever when attempting to disconnect due to a deadlock in the VIF-device. +
+ +##
bugfix
Fix panic in user daemon when traffic-manager was unreachable
+
+ +The user daemon would panic if the traffic-manager was unreachable. It will now instead report a proper error to the client. +
+ +##
change
Removal of backward support for versions predating 2.6.0
+
+ +The telepresence helm installer will no longer discover and convert workloads that were modified by versions prior to 2.6.0. The traffic manager will and no longer support the muxed tunnels used in versions prior to 2.5.0. +
+ +## Version 2.17.0 (November 14) +##
feature
Additional Prometheus metrics to track intercept/connect activity
+
+ +This feature adds the following metrics to the Prometheus endpoint: connect_count, connect_active_status, intercept_count, and intercept_active_status. These are labeled by client/install_id. Additionally, the intercept_count metric has been renamed to active_intercept_count for clarity. +
+ +##
feature
Make the Telepresence client docker image configurable.
+
+ +The docker image used when running a Telepresence intercept in docker mode can now be configured using the setting images.clientImage and will default first to the value of the environment TELEPRESENCE_CLIENT_IMAGE, and then to the value preset by the telepresence binary. This configuration setting is primarily intended for testing purposes. +
+ +##
feature
Use traffic-agent port-forwards for outbound and intercepted traffic.
+
+ +The telepresence TUN-device is now capable of establishing direct port-forwards to a traffic-agent in the connected namespace. That port-forward is then used for all outbound traffic to the device, and also for all traffic that arrives from intercepted workloads. Getting rid of the extra hop via the traffic-manager improves performance and reduces the load on the traffic-manager. The feature can only be used if the client has Kubernetes port-forward permissions to the connected namespace. It can be disabled by setting cluster.agentPortForward to false in config.yml. +
+ +##
feature
Improve outbound traffic performance.
+
+ +The root-daemon now communicates directly with the traffic-manager instead of routing all outbound traffic through the user-daemon. The root-daemon uses a patched kubeconfig where exec configurations to obtain credentials are dispatched to the user-daemon. This to ensure that all authentication plugins will execute in user-space. The old behavior of routing everything through the user-daemon can be restored by setting cluster.connectFromRootDaemon to false in config.yml. +
+ +##
feature
New networking CLI flag --allow-conflicting-subnets
+
+ +telepresence connect (and other commands that kick off a connect) now accepts an --allow-conflicting-subnets CLI flag. This is equivalent to client.routing.allowConflictingSubnets in the helm chart, but can be specified at connect time. It will be appended to any configuration pushed from the traffic manager. +
+ +##
change
Warn if large version mismatch between traffic manager and client.
+
+ +Print a warning if the minor version diff between the client and the traffic manager is greater than three. +
+ +##
change
The authenticator binary was removed from the docker image.
+
+ +The authenticator binary, used when serving proxied exec kubeconfig credential retrieval, has been removed. The functionality was instead added as a subcommand to the telepresence binary. +
+ +## Version 2.16.1 (October 12) +##
feature
Add --docker-debug flag to the telepresence intercept command.
+
+ +This flag is similar to --docker-build but will start the container with more relaxed security using the docker run flags --security-opt apparmor=unconfined --cap-add SYS_PTRACE. +
+ +##
feature
Add a --export option to the telepresence connect command.
+
+ +In some situations it is necessary to make some ports available to the host from a containerized telepresence daemon. This commit adds a repeatable --expose <docker port exposure> flag to the connect command. +
+ +##
feature
Prevent agent-injector webhook from selecting from kube-xxx namespaces.
+
+ +The kube-system and kube-node-lease namespaces should not be affected by a global agent-injector webhook by default. A default namespaceSelector was therefore added to the Helm Chart agentInjector.webhook that contains a NotIn preventing those namespaces from being selected. +
+ +##
bugfix
Backward compatibility for pod template TLS annotations.
+
+ +Users of Telepresence < 2.9.0 that make use of the pod template TLS annotations were unable to upgrade because the annotation names have changed (now prefixed by "telepresence."), and the environment expansion of the annotation values was dropped. This fix restores support for the old names (while retaining the new ones) and the environment expansion. +
+ +##
security
Built with go 1.21.3
+
+ +Built Telepresence with go 1.21.3 to address CVEs. +
+ +##
bugfix
Match service selector against pod template labels
+
+ +When listing intercepts (typically by calling telepresence list) selectors of services are matched against workloads. Previously the match was made against the labels of the workload, but now they are matched against the labels pod template of the workload. Since the service would actually be matched against pods this is more correct. The most common case when this makes a difference is that statefulsets now are listed when they should. +
+ +## Version 2.16.0 (October 2) +##
bugfix
The helm sub-commands will no longer start the user daemon.
+
+ +The telepresence helm install/upgrade/uninstall commands will no longer start the telepresence user daemon because there's no need to connect to the traffic-manager in order for them to execute. +
+ +##
bugfix
Routing table race condition
+
+ +A race condition would sometimes occur when a Telepresence TUN device was deleted and another created in rapid succession that caused the routing table to reference interfaces that no longer existed. +
+ +##
bugfix
Stop lingering daemon container
+
+ +When using telepresence connect --docker, a lingering container could be present, causing errors like "The container name NN is already in use by container XX ...". When this happens, the connect logic will now give the container some time to stop and then call docker stop NN to stop it before retrying to start it. +
+ +##
bugfix
Add file locking to the Telepresence cache
+
+ +Files in the Telepresence cache are accesses by multiple processes. The processes will now use advisory locks on the files to guarantee consistency. +
+ +##
change
Lock connection to namespace
+
+ +The behavior changed so that a connected Telepresence client is bound to a namespace. The namespace can then not be changed unless the client disconnects and reconnects. A connection is also given a name. The default name is composed from <kube context name>-<namespace> but can be given explicitly when connecting using --name. The connection can optionally be identified using the option --use <name match> (only needed when docker is used and more than one connection is active). +
+ +##
change
Deprecation of global --context and --docker flags.
+
+ +The global flags --context and --docker will now be considered deprecated unless used with commands that accept the full set of Kubernetes flags (e.g. telepresence connect). +
+ +##
change
Deprecation of the --namespace flag for the intercept command.
+
+ +The --namespace flag is now deprecated for telepresence intercept command. The flag can instead be used with all commands that accept the full set of Kubernetes flags (e.g. telepresence connect). +
+ +##
change
Legacy code predating version 2.6.0 was removed.
+
+ +The telepresence code-base still contained a lot of code that would modify workloads instead of relying on the mutating webhook installer when a traffic-manager version predating version 2.6.0 was discovered. This code has now been removed. +
+ +##
feature
Add `telepresence list-namespaces` and `telepresence list-contexts` commands
+
+ +These commands can be used to check accessible namespaces and for automation. +
+ +##
change
Implicit connect warning
+
+ +A deprecation warning will be printed if a command other than telepresence connect causes an implicit connect to happen. Implicit connects will be removed in a future release. +
+ +## Version 2.15.1 (September 6) +##
security
Rebuild with go 1.21.1
+
+ +Rebuild Telepresence with go 1.21.1 to address CVEs. +
+ +##
security
Set security context for traffic agent
+
+ +Openshift users reported that the traffic agent injection was failing due to a missing security context. +
+ +## Version 2.15.0 (August 29) +##
security
Add ASLR to telepresence binaries
+
+ +ASLR hardens binary sercurity against fixed memory attacks. +
+ +##
feature
[Added client builds for arm64 architecture.](https://github.com/telepresenceio/telepresence/issues/3259)
+
+ +Updated the release workflow files in github actions to including building and publishing the client binaries for arm64 architecture. +
+ +##
bugfix
[KUBECONFIG env var can now be used with the docker mode.](https://github.com/telepresenceio/telepresence/pull/3300)
+
+ +If provided, the KUBECONFIG environment variable was passed to the kubeauth-foreground service as a parameter. However, since it didn't exist, the CLI was throwing an error when using telepresence connect --docker. +
+ +##
bugfix
[Fix deadlock while watching workloads](https://github.com/telepresenceio/telepresence/pull/3298)
+
+ +The telepresence list --output json-stream wasn't releasing the session's lock after being stopped, including with a telepresence quit. The user could be blocked as a result. +
+ +##
bugfix
Change json output of telepresence list command
+
+ +Replace deprecated info in the JSON output of the telepresence list command. +
+ +## Version 2.14.4 (August 21) +##
bugfix
[Nil pointer exception when upgrading the traffic-manager.](https://github.com/telepresenceio/telepresence/issues/3313)
+
+ +Upgrading the traffic-manager using telepresence helm upgrade would sometimes result in a helm error message executing "telepresence/templates/intercept-env-configmap.yaml" at <.Values.intercept.environment.excluded>: nil pointer evaluating interface {}.excluded" +
+ +## Version 2.14.2 (July 26) +##
bugfix
[Telepresence now use the OSS agent in its latest version by default.](https://github.com/telepresenceio/telepresence/issues/3271)
+
+ +The traffic manager admin was forced to set it manually during the chart installation. +
+ +## Version 2.14.1 (July 7) +##
feature
Envoy's http idle timout is now configurable.
+
+ +A new agent.helm.httpIdleTimeout setting was added to the Helm chart that controls the proprietary Traffic agent's http idle timeout. The default of one hour, which in some situations would cause a lot of resource consuming and lingering connections, was changed to 70 seconds. +
+ +##
feature
Add more gauges to the Traffic manager's Prometheus client.
+
+ +Several gauges were added to the Prometheus client to make it easier to monitor what the Traffic manager spends resources on. +
+ +##
feature
Agent Pull Policy
+
+ +Add option to set traffic agent pull policy in helm chart. +
+ +##
bugfix
Resource leak in the Traffic manager.
+
+ +Fixes a resource leak in the Traffic manager caused by lingering tunnels between the clients and Traffic agents. The tunnels are now closed correctly when terminated from the side that created them. +
+ +##
bugfix
[Fixed problem setting traffic manager namespace using the kubeconfig extension.](https://www.getambassador.io/docs/telepresence/latest/reference/config#manager)
+
+ +Fixes a regression introduced in version 2.10.5, making it impossible to set the traffic-manager namespace using the telepresence.io kubeconfig extension. +
+ +## Version 2.14.0 (June 12) +##
feature
[DNS configuration now supports excludes and mappings.](https://github.com/telepresenceio/telepresence/pull/3172)
+
+ +The DNS configuration now supports two new fields, excludes and mappings. The excludes field allows you to exclude a given list of hostnames from resolution, while the mappings field can be used to resolve a hostname with another. +
+ +##
feature
Added the ability to exclude environment variables
+
+ +Added a new config map that can take an array of environment variables that will then be excluded from an intercept that retrieves the environment of a pod. +
+ +##
bugfix
Fixed traffic-agent backward incompatibility issue causing lack of remote mounts
+
+ +A traffic-agent of version 2.13.3 (or 1.13.15) would not propagate the directories under /var/run/secrets when used with a traffic manager older than 2.13.3. +
+ +##
bugfix
[Fixed race condition causing segfaults on rare occasions when a tunnel stream timed out.](https://github.com/telepresenceio/telepresence/pull/2963)
+
+ +A context cancellation could sometimes be trapped in a stream reader, causing it to incorrectly return an undefined message which in turn caused the parent reader to panic on a nil pointer reference. +
+ +##
change
Routing conflict reporting.
+
+ +Telepresence will now attempt to detect and report routing conflicts with other running VPN software on client machines. There is a new configuration flag that can be tweaked to allow certain CIDRs to be overridden by Telepresence. +
+ +##
change
test-vpn command deprecated
+
+ +Running telepresence test-vpn will now print a deprecation warning and exit. The command will be removed in a future release. Instead, please configure telepresence for your VPN's routes. +
+ +## Version 2.13.3 (May 25) +##
feature
[Add imagePullSecrets to hooks](https://github.com/telepresenceio/telepresence/pull/3079)
+
+ +Add .Values.hooks.curl.imagePullSecrets and .Values.hooks curl.imagePullSecrets to Helm values. +
+ +##
change
Change reinvocation policy to Never for the mutating webhook
+
+ +The default setting of the reinvocationPolicy for the mutating webhook dealing with agent injections changed from Never to IfNeeded. +
+ +##
bugfix
[Fix mounting fail of IAM roles for service accounts web identity token](https://github.com/telepresenceio/telepresence/issues/3166)
+
+ +The eks.amazonaws.com/serviceaccount volume injected by EKS is now exported and remotely mounted during an intercept. +
+ +##
bugfix
[Correct namespace selector for cluster versions with non-numeric characters](https://github.com/telepresenceio/telepresence/pull/3184)
+
+ +The mutating webhook now correctly applies the namespace selector even if the cluster version contains non-numeric characters. For example, it can now handle versions such as Major:"1", Minor:"22+". +
+ +##
bugfix
[Enable IPv6 on the telepresence docker network](https://github.com/telepresenceio/telepresence/issues/3179)
+
+ +The "telepresence" Docker network will now propagate DNS AAAA queries to the Telepresence DNS resolver when it runs in a Docker container. +
+ +##
bugfix
[Fix the crash when intercepting with --local-only and --docker-run](https://github.com/telepresenceio/telepresence/issues/3171)
+
+ +Running telepresence intercept --local-only --docker-run no longer results in a panic. +
+ +##
bugfix
[Fix incorrect error message with local-only mounts](https://github.com/telepresenceio/telepresence/issues/3171)
+
+ +Running telepresence intercept --local-only --mount false no longer results in an incorrect error message saying "a local-only intercept cannot have mounts". +
+ +##
bugfix
[specify port in hook urls](https://github.com/telepresenceio/telepresence/pull/3161)
+
+ +The helm chart now correctly handles custom agentInjector.webhook.port that was not being set in hook URLs. +
+ +##
bugfix
Fix wrong default value for disableGlobal and agentArrival
+
+ +Params .intercept.disableGlobal and .timeouts.agentArrival are now correctly honored. +
+ diff --git a/docs/v2.19/troubleshooting.md b/docs/v2.19/troubleshooting.md new file mode 100644 index 00000000..e20193d1 --- /dev/null +++ b/docs/v2.19/troubleshooting.md @@ -0,0 +1,253 @@ +--- +title: "Telepresence Troubleshooting" +description: "Learn how to troubleshoot common issues related to Telepresence, including intercept issues, cluster connection issues, and errors related to Ambassador Cloud." +--- +# Troubleshooting + +## Connecting to a cluster via VPN doesn't work. + +There are a few different issues that could arise when working with a VPN. Please see the [dedicated page](reference/vpn.md) on Telepresence and VPNs to learn more on how to fix these. + +## Connecting to a cluster hosted in a VM on the workstation doesn't work + +The cluster probably has access to the host's network and gets confused when it is mapped by Telepresence. +Please check the [cluster in hosted vm](howtos/cluster-in-vm.md) for more details. + +## Volume mounts are not working on macOS + +It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. Lately there's been some issues using `brew install sshfs` a macOS workstation because the required component `osxfuse` (now named `macfuse`) isn't open source and hence, no longer supported. As a workaround, you can now use `gromgit/fuse/sshfs-mac` instead. Follow these steps: + +1. Remove old sshfs, macfuse, osxfuse using `brew uninstall` +2. `brew install --cask macfuse` +3. `brew install gromgit/fuse/sshfs-mac` +4. `brew link --overwrite sshfs-mac` + +Now sshfs -V shows you the correct version, e.g.: +``` +$ sshfs -V +SSHFS version 2.10 +FUSE library version: 2.9.9 +fuse: no mount point +``` + +5. Next, try a mount (or an intercept that performs a mount). It will fail because you need to give permission to “Benjamin Fleischer” to execute a kernel extension (a pop-up appears that takes you to the system preferences). +6. Approve the needed permission +7. Reboot your computer. + +## Volume mounts are not working on Linux +It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. + +After you've installed `sshfs`, if mounts still aren't working: +1. Uncomment `user_allow_other` in `/etc/fuse.conf` +2. Add your user to the "fuse" group with: `sudo usermod -a -G fuse ` +3. Restart your computer after uncommenting `user_allow_other` + +## Distributed tracing + +Telepresence is a complex piece of software with components running locally on your laptop and remotely in a distributed kubernetes environment. +As such, troubleshooting investigations require tools that can give users, cluster admins, and maintainers a broad view of what these distributed components are doing. +In order to facilitate such investigations, telepresence >= 2.7.0 includes distributed tracing functionality via [OpenTelemetry](https://opentelemetry.io/) +Tracing is controlled via a `grpcPort` flag under the `tracing` configuration of your `values.yaml`. It is enabled by default and can be disabled by setting `grpcPort` to `0`, or `tracing` to an empty object: + +```yaml +tracing: {} +``` + +If tracing is configured, the traffic manager and traffic agents will open a GRPC server under the port given, from which telepresence clients will be able to gather trace data. +To collect trace data, ensure you're connected to the cluster, perform whatever operation you'd like to debug and then run `gather-traces` immediately after: + +```console +$ telepresence gather-traces +``` + +This command will gather traces from both the cloud and local components of telepresence and output them into a file called `traces.gz` in your current working directory: + +```console +$ file traces.gz + traces.gz: gzip compressed data, original size modulo 2^32 158255 +``` + +Please do not try to open or uncompress this file, as it contains binary trace data. +Instead, you can use the `upload-traces` command built into telepresence to send it to an [OpenTelemetry collector](https://opentelemetry.io/docs/collector/) for ingestion: + +```console +$ telepresence upload-traces traces.gz $OTLP_GRPC_ENDPOINT +``` + +Once that's been done, the traces will be visible via whatever means your usual collector allows. For example, this is what they look like when loaded into Jaeger's [OTLP API](https://www.jaegertracing.io/docs/1.36/apis/#opentelemetry-protocol-stable): + +![Jaeger Interface](images/tracing.png) + +**Note:** The host and port provided for the `OTLP_GRPC_ENDPOINT` must accept OTLP formatted spans (instead of e.g. Jaeger or Zipkin specific spans) via a GRPC API (instead of the HTTP API that is also available in some collectors) +**Note:** Since traces are not automatically shipped to the backend by telepresence, they are stored in memory. Hence, to avoid running telepresence components out of memory, only the last 10MB of trace data are available for export. + +### No Sidecar Injected in GKE private clusters + +An attempt to `telepresence intercept` results in a timeout, and upon examination of the pods (`kubectl get pods`) it's discovered that the intercept command did not inject a sidecar into the workload's pods: + +```bash +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +echo-easy-7f6d54cff8-rz44k 1/1 Running 0 5m5s + +$ telepresence intercept echo-easy -p 8080 +telepresence: error: connector.CreateIntercept: request timed out while waiting for agent echo-easy.default to arrive +$ kubectl get pod +NAME READY STATUS RESTARTS AGE +echo-easy-d8dc4cc7c-27567 1/1 Running 0 2m9s + +# Notice how 1/1 containers are ready. +``` + +If this is occurring in a GKE cluster with private networking enabled, it is likely due to firewall rules blocking the +Traffic Manager's webhook injector from the API server. +To fix this, add a firewall rule allowing your cluster's master nodes to access TCP port `443` in your cluster's pods, +or change the port number that Telepresence is using for the agent injector by providing the number of an allowed port +using the Helm chart value `agentInjector.webhook.port`. +Please refer to the [telepresence install instructions](install/cloud#gke) or the [GCP docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) for information to resolve this. + +## Injected init-container doesn't function properly + +The init-container is injected to insert `iptables` rules that redirects port numbers from the app container to the +traffic-agent sidecar. This is necessary when the service's `targetPort` is numeric. It requires elevated privileges +(`NET_ADMIN` capabilities), and the inserted rules may get overridden by `iptables` rules inserted by other vendors, +such as Istio or Linkerd. + +Injection of the init-container can often be avoided by using a `targetPort` _name_ instead of a number, and ensure +that the corresponding container's `containerPort` is also named. This example uses the name "http", but any valid +name will do: +```yaml +apiVersion: v1 +kind: Pod +metadata: + ... +spec: + ... + containers: + - ... + ports: + - name: http + containerPort: 8080 +--- +apiVersion: v1 +kind: Service +metadata: + ... +spec: + ... + ports: + - port: 80 + targetPort: http +``` + +Telepresence's mutating webhook will refrain from injecting an init-container when the `targetPort` is a name. Instead, +it will do the following during the injection of the traffic-agent: + +1. Rename the designated container's port by prefixing it (i.e., containerPort: http becomes containerPort: tm-http). +2. Let the container port of our injected traffic-agent use the original name (i.e., containerPort: http). + +Kubernetes takes care of the rest and will now associate the service's `targetPort` with our traffic-agent's +`containerPort`. + +### Important note +If the service is "headless" (using `ClusterIP: None`), then using named ports won't help because the `targetPort` will +not get remapped. A headless service will always require the init-container. + +## Error connecting to GKE or EKS cluster + +GKE and EKS require a plugin that utilizes their resepective IAM providers. +You will need to install the [gke](install/cloud#gke-authentication-plugin) or [eks](install/cloud#eks-authentication-plugin) plugins +for Telepresence to connect to your cluster. + +## `too many files open` error when running `telepresence connect` on Linux + +If `telepresence connect` on linux fails with a message in the logs `too many files open`, then check if `fs.inotify.max_user_instances` is set too low. Check the current settings with `sysctl fs.notify.max_user_instances` and increase it temporarily with `sudo sysctl -w fs.inotify.max_user_instances=512`. For more information about permanently increasing it see [Kernel inotify watch limit reached](https://unix.stackexchange.com/a/13757/514457). + +## Connected to cluster via VPN but IPs don't resolve + +If `telepresence connect` succeeds, but you find yourself unable to reach services on your cluster, a routing conflict may be to blame. This frequently happens when connecting to a VPN at the same time as telepresence, +as often VPN clients may add routes that conflict with those added by telepresence. To debug this, pick an IP address in the cluster and get its route information. In this case, we'll get the route for `100.124.150.45`, and discover +that it's running through a `tailscale` device. + + + + +```console +$ route -n get 100.124.150.45 + route to: 100.64.2.3 +destination: 100.64.0.0 + mask: 255.192.0.0 + interface: utun4 + flags: + recvpipe sendpipe ssthresh rtt,msec rttvar hopcount mtu expire + 0 0 0 0 0 0 1280 0 +``` + +Note that in macos it's difficult to determine what software the name of a virtual interface corresponds to -- `utun4` doesn't indicate that it was created by tailscale. +One option is to look at the output of `ifconfig` before and after connecting to your VPN to see if the interface in question is being added upon connection + + + + +```console +$ ip route get 100.124.150.45 +100.64.2.3 dev tailscale0 table 52 src 100.111.250.89 uid 0 +``` + + + + +```console +$ Find-NetRoute -RemoteIPAddress 100.124.150.45 + +IPAddress : 100.102.111.26 +InterfaceIndex : 29 +InterfaceAlias : Tailscale +AddressFamily : IPv4 +Type : Unicast +PrefixLength : 32 +PrefixOrigin : Manual +SuffixOrigin : Manual +AddressState : Preferred +ValidLifetime : Infinite ([TimeSpan]::MaxValue) +PreferredLifetime : Infinite ([TimeSpan]::MaxValue) +SkipAsSource : False +PolicyStore : ActiveStore + + +Caption : +Description : +ElementName : +InstanceID : ;::8;;;8 + + +This will tell you which device the traffic is being routed through. As a rule, if the traffic is not being routed by the telepresence device, +your VPN may need to be reconfigured, as its routing configuration is conflicting with telepresence. One way to determine if this is the case +is to run `telepresence quit -s`, check the route for an IP in the cluster (see commands above), run `telepresence connect`, and re-run the commands to see if the output changes. +If it doesn't change, that means telepresence is unable to override your VPN routes, and your VPN may need to be reconfigured. Talk to your network admins +to configure it such that clients do not add routes that conflict with the pod and service CIDRs of the clusters. How this will be done will +vary depending on the VPN provider. +Future versions of telepresence will be smarter about informing you of such conflicts upon connection. diff --git a/docs/v2.19/versions.yml b/docs/v2.19/versions.yml new file mode 100644 index 00000000..58e289c8 --- /dev/null +++ b/docs/v2.19/versions.yml @@ -0,0 +1,2 @@ +version: "2.19.1" +dlVersion: "v2.19.1" diff --git a/docs/v2.2/community.md b/docs/v2.2/community.md deleted file mode 100644 index 922457c9..00000000 --- a/docs/v2.2/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/v2.2/concepts/context-prop.md b/docs/v2.2/concepts/context-prop.md deleted file mode 100644 index 4ec09396..00000000 --- a/docs/v2.2/concepts/context-prop.md +++ /dev/null @@ -1,25 +0,0 @@ -# Context propagation - -**Context propagation** is the transfer of request metadata across the services and remote processes of a distributed system. Telepresence uses context propagation to intelligently route requests to the appropriate destination. - -This metadata is the context that is transferred across system services. It commonly takes the form of HTTP headers; context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -Metadata propagation refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - - -## What is distributed tracing? - -Distributed tracing is a technique for troubleshooting and profiling distributed microservices applications and is a common application for context propagation. It is becoming a key component for debugging. - -In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) (or another header format, such as [B3 headers](https://github.com/openzipkin/b3-propagation)), which facilitates maintaining the headers through every service without being stripped (the propagation). - - -## What are intercepts and preview URLs? - -[Intercepts](../../reference/intercepts) and [preview URLs](../../howtos/preview-urls/) are functions of Telepresence that enable easy local development from a remote Kubernetes cluster and offer a preview environment for sharing and real-time collaboration. - -Telepresence also uses custom headers and header propagation for controllable intercepts and preview URLs instead of for tracing. The headers facilitate the smart routing of requests either to live services in the cluster or services running locally on a developer’s machine. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to [Ambassador Cloud](https://app.getambassador.io) with other information about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. diff --git a/docs/v2.2/concepts/devworkflow.md b/docs/v2.2/concepts/devworkflow.md deleted file mode 100644 index b09f186d..00000000 --- a/docs/v2.2/concepts/devworkflow.md +++ /dev/null @@ -1,7 +0,0 @@ -# The changing development workflow - -A changing workflow is one of the main challenges for developers adopting Kubernetes. Software development itself isn’t the challenge. Developers can continue to [code using the languages and tools with which they are most productive and comfortable](/resources/kubernetes-local-dev-toolkit/). That’s the beauty of containerized development. - -However, the cloud-native, Kubernetes-based approach to development means adopting a new development workflow and development environment. Beyond the basics, such as figuring out how to containerize software, [how to run containers in Kubernetes](/docs/kubernetes/latest/concepts/appdev/), and how to deploy changes into containers, for example, Kubernetes adds complexity before it delivers efficiency. The promise of a “quicker way to develop software” applies at least within the traditional aspects of the inner dev loop, where the single developer codes, builds and tests their software. But both within the inner dev loop and once code is pushed into version control to trigger the outer dev loop, the developer experience changes considerably from what many developers are used to. - -In this new paradigm, new steps are added to the inner dev loop, and more broadly, the developer begins to share responsibility for the full life cycle of their software. Inevitably this means taking new workflows and tools on board to ensure that the full life cycle continues full speed ahead. diff --git a/docs/v2.2/concepts/faster.md b/docs/v2.2/concepts/faster.md deleted file mode 100644 index 7aa74ad1..00000000 --- a/docs/v2.2/concepts/faster.md +++ /dev/null @@ -1,25 +0,0 @@ -# Making the remote local: Faster feedback, collaboration and debugging - -With the goal of achieving [fast, efficient development](/use-case/local-kubernetes-development/), developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. - -## How should I set up a Kubernetes development environment? - -[Setting up a development environment](/resources/development-environments-microservices/) for Kubernetes can be much more complex than the set up for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. - -While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. - -A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../../quick-start/) to facilitate fast feedback, debugging and collaboration. - -## What is Telepresence? - -Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../../quick-start/). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. - -## How can I get fast, efficient local development? - -The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. - -A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code, local debugging in production, and [preview URL](../../howtos/preview-urls/) functionality to share dev environments with others for multi-user collaboration. - -Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. - -The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts and preview URLs. diff --git a/docs/v2.2/doc-links.yml b/docs/v2.2/doc-links.yml deleted file mode 100644 index 05b54d73..00000000 --- a/docs/v2.2/doc-links.yml +++ /dev/null @@ -1,58 +0,0 @@ - - title: Quick start - link: quick-start - - title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ - - title: Core concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: 'Making the remote local: Faster feedback, collaboration and debugging' - link: concepts/faster - - title: Context propagation - link: concepts/context-prop - - title: How do I... - items: - - title: Intercept a service in your own environment - link: howtos/intercepts - - title: Share dev environments with preview URLs - link: howtos/preview-urls - - title: Proxy outbound traffic to my cluster - link: howtos/outbound - - title: Technical reference - items: - - title: Architecture - link: reference/architecture - - title: Client reference - link: reference/client - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Using Docker for intercepts - link: reference/docker-run - - title: Environment variables - link: reference/environment - - title: Intercepts - link: reference/intercepts - - title: Volume mounts - link: reference/volume - - title: DNS resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: Using Telepresence with Linkerd - link: reference/linkerd - - title: FAQs - link: faqs - - title: Troubleshooting - link: troubleshooting - - title: Community - link: community diff --git a/docs/v2.2/faqs.md b/docs/v2.2/faqs.md deleted file mode 100644 index 3517c7f7..00000000 --- a/docs/v2.2/faqs.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](../../../../feedback) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the Traffic Manager and Traffic Agent containers that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single Traffic Manager service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the Traffic Manager service installed in the cluster and Traffic Agent containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established via the standard `kubectl` mechanisms and SSH tunnelling. - - - -** What identity providers are supported for authenticating to view a preview URL?** - -* GitHub -* GitLab -* Google - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](../../../../feedback) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Telepresence will be open source soon, in the meantime it is free to download. We prioritized releasing the binary as soon as possible for community feedback, but are actively working on the open sourcing logistics. - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](../../../../feedback), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/v2.2/howtos/intercepts.md b/docs/v2.2/howtos/intercepts.md deleted file mode 100644 index 2a88f752..00000000 --- a/docs/v2.2/howtos/intercepts.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Intercept a service in your own environment - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Intercept your service](#3-intercept-your-service) -* [4. Create a preview URL to only intercept certain requests to your service](#4-create-a-preview-url-to-only-intercept-certain-requests-to-your-service) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -For a detailed walk-though on creating intercepts using our sample app, follow the quick start guide. - -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/install-kubectl/) and [set up](https://kubernetes.io/docs/tasks/tools/install-kubectl/#verifying-kubectl-configuration) to use a Kubernetes cluster, preferably an empty test cluster. - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -This guide assumes you have a Kubernetes deployment and service accessible publicly by an ingress controller and that you can run a copy of that service on your laptop. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: - `telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: - `curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Intercept your service - -In this section, we will go through the steps required for you to intercept all traffic going to a service in your cluster and route it to your local environment instead. - -1. List the services that you can intercept with `telepresence list` and make sure the one you want to intercept is listed. - - For example, this would confirm that `example-service` can be intercepted by Telepresence: - ``` - $ telepresence list - - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -2. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example, this would show that the port `80` is named `http` in the `example-service`: - - ``` - $ kubectl get service example-service --output yaml - - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -3. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - - - For the `--port` argument, specify the port on which your local instance of your service will be running. - - If the service you are intercepting exposes more than one port, specify the one you want to intercept after a colon. - - For the `--env-file` argument, specify the path to a file on which Telepresence should write the environment variables that your service is currently running with. This is going to be useful as we start our service. - - For the example below, Telepresence will intercept traffic going to service `example-service` so that requests reaching it on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -4. Start your local environment using the environment variables retrieved in the previous step. - - Here are a few options to pass the environment variables to your local process: - - with `docker run`, provide the path to the file using the [`--env-file` argument](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) - - with JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.) use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile) - - with Visual Studio Code, specify the path to the environment variables file in the `envFile` field of your configuration - -5. Query the environment in which you intercepted a service the way you usually would and see your local instance being invoked. - - - Didn't work? Make sure the port you're listening on matches the one specified when creating your intercept. - - - - Congratulations! All the traffic usually going to your Kubernetes Service is now being routed to your local environment! - - -You can now: -- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - -## 4. Create a preview URL to only intercept certain requests to your service - -When working on a development environment with multiple engineers, you don't want your intercepts to impact your -teammates. Ambassador Cloud automatically generates a preview URL when creating an intercept if you are logged in. By -doing so, Telepresence can route only the requests coming from that preview URL to your local environment; the rest will -be routed to your cluster as usual. - -1. Clean up your previous intercept by removing it: -`telepresence leave ` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept --port [:] --env-file ` - - You will be asked for the following information: - 1. **Ingress layer 3 address**: This would usually be the internal address of your ingress controller in the format `.namespace`. For example, if you have a service `ambassador-edge-stack` in the `ambassador` namespace, you would enter `ambassador-edge-stack.ambassador`. - 2. **Ingress port**: The port on which your ingress controller is listening (often 80 for non-TLS and 443 for TLS). - 3. **Ingress TLS encryption**: Whether the ingress controller is expecting TLS communication on the specified port. - 4. **Ingress layer 5 hostname**: If your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), this is the value you would need to enter here. - - - Telepresence supports any ingress controller, not just Ambassador Edge Stack. - - - For the example below, you will create a preview URL that will send traffic to the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and setting the `Host` HTTP header to `dev-environment.edgestack.me`: - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [default: -]: 443 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using Deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -4. Start your local service as in the previous step. - -5. Go to the preview URL printed after doing the intercept and see that your local service is processing the request. - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -6. Make a request on the URL you would usually query for that environment. The request should not be routed to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) will route to services in the cluster like normal. - - - Congratulations! You have now only intercepted traffic coming from your preview URL, without impacting your teammates. - - -You can now: -- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - -...and all of this without impacting your teammates! -## What's Next? - - diff --git a/docs/v2.2/howtos/outbound.md b/docs/v2.2/howtos/outbound.md deleted file mode 100644 index 83ec20b0..00000000 --- a/docs/v2.2/howtos/outbound.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Proxy outbound traffic to my cluster - -While preview URLs are a powerful feature, there are other options to use Telepresence for proxying traffic between your laptop and the cluster. - - We'll assume below that you have the quick start sample web app running in your cluster so that we can test accessing the verylargejavaservice service. That service can be substituted however for any service you are running. - -## Proxying outbound traffic - -Connecting to the cluster instead of running an intercept will allow you to access cluster workloads as if your laptop was another pod in the cluster. You will be able to access other Kubernetes services using `.`, for example by curling a service from your terminal. A service running on your laptop will also be able to interact with other services on the cluster by name. - -Connecting to the cluster starts the background daemon on your machine and installs the [Traffic Manager pod](../../reference/architecture/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect`, you will be prompted for your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.1.4 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground /home//.cache/telepresence/logs '' ''" - [sudo] password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -1. Run `telepresence status` to confirm that you are connected to your cluster and are proxying traffic to it. - - ``` - $ telepresence status - Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" - User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 0 total - ``` - -1. Now try to access your service by name with `curl verylargejavaservice.default:8080`. Telepresence will route the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl verylargejavaservice.default:8080 - - - - Welcome to the EdgyCorp WebApp - ... - ``` - -3. Terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer being proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -When using Telepresence in this way, services must be accessed with the namespace qualified DNS name (<service name>.<namespace>) before starting an intercept. After starting an intercept, only <service name> is required. Read more about these differences in DNS resolution here. - -## Controlling outbound connectivity - -By default, Telepresence will provide access to all Services found in all namespaces in the connected cluster. This might lead to problems if the user does not have access permissions to all namespaces via RBAC. The `--mapped-namespaces ` flag was added to give the user control over exactly which namespaces will be accessible. - -When using this option, it is important to include all namespaces containing services to be accessed and also all namespaces that contain services that those intercepted services might use. - -### Using local-only intercepts - -An intercept with the flag`--local-only` can be used to control outbound connectivity to specific namespaces. - -When developing services that have not yet been deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service is intended to be deployed so that it can access other services in that namespace without using qualified names. - - ``` - $ telepresence intercept --namespace --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. The intercept is deactivated just like any other intercept. - - ``` - $ telepresence leave - ``` -The unqualified name access is now removed provided that no other intercept is active and using the same namespace. - -### External dependencies (formerly `--also-proxy`) - -If you have a resource outside of the cluster that you need access to, you can leverage Headless Services to provide access. This will give you a kubernetes service formatted like all other services (`my-service.prod.svc.cluster.local`), that resolves to your resource. - -If the outside service has a DNS name, you can use the [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) service type, which will create a service that can be used from within your cluster and from your local machine when connected with telepresence. - -If the outside service is an ip, create a [service without selectors](https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors) and then create an endpoint of the same name. - -In both scenarios, Kubernetes will create a service that can be used from within your cluster and from your local machine when connected with telepresence. diff --git a/docs/v2.2/howtos/preview-urls.md b/docs/v2.2/howtos/preview-urls.md deleted file mode 100644 index d0934e05..00000000 --- a/docs/v2.2/howtos/preview-urls.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Share dev environments with preview URLs - -Telepresence can generate sharable preview URLs, allowing you to work on a copy of your service locally and share that environment directly with a teammate for pair programming. While using preview URLs Telepresence will route only the requests coming from that preview URL to your local environment; requests to the ingress will be routed to your cluster as usual. - -Preview URLs are protected behind authentication via Ambassador Cloud, ensuring that only users in your organization can view them. A preview URL can also be set to allow public access for sharing with outside collaborators. - -## Prerequisites - -* You should have the Telepresence CLI [installed](../../install/) on your laptop. - -* If you have Telepresence already installed and have used it previously, please first reset it with `telepresence uninstall --everything`. - -* You will need a service running in your cluster that you would like to intercept. - - -Need a sample app to try with preview URLs? Check out the quick start. It has a multi-service app to install in your cluster with instructions to create a preview URL for that app. - - -## Creating a preview URL - -1. List the services that you can intercept with `telepresence list` and make sure the one you want is listed. - - If it isn't: - - * Only Deployments, ReplicaSets, or StatefulSets are supported, and each of those requires a label matching a Service - - * If the service is in a different namespace, specify it with the `--namespace` flag - -2. Login to Ambassador Cloud where you can manage and share preview URLs: -`telepresence login` - - ``` - $ telepresence login - - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept: -`telepresence intercept --port --env-file ` - - For `--port`, specify the port on which your local instance of your service will be running. If the service you are intercepting exposes more than one port, specify the one you want to intercept after a colon. - - For `--env-file`, specify a file path where Telepresence will write the environment variables that are set in the Pod. This is going to be useful as we start our service locally. - - You will be asked for the following information: - 1. **Ingress layer 3 address**: This would usually be the internal address of your ingress controller in the format `.namespace `. For example, if you have a service `ambassador-edge-stack` in the `ambassador` namespace, you would enter `ambassador-edge-stack.ambassador`. - 2. **Ingress port**: The port on which your ingress controller is listening (often 80 for non-TLS and 443 for TLS). - 3. **Ingress TLS encryption**: Whether the ingress controller is expecting TLS communication on the specified port. - 4. **Ingress layer 5 hostname**: If your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), enter that value here. - - For the example below, you will create a preview URL for `example-service` which listens on port 8080. The preview URL for ingress will use the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and the hostname `dev-environment.edgestack.me`: - - ``` - $ telepresence intercept example-service --port 8080 --env-file ~/ex-svc.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [default: -]: 443 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -4. Start your local environment using the environment variables retrieved in the previous step. - - Here are a few options to pass the environment variables to your local process: - - with `docker run`, provide the path to the file using the [`--env-file` argument](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) - - with JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.) use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile) - - with Visual Studio Code, specify the path to the environment variables file in the `envFile` field of your configuration - -5. Go to the preview URL that was provided after starting the intercept (the next to last line in the terminal output above). Your local service will be processing the request. - - - Success! You have intercepted traffic coming from your preview URL without impacting other traffic from your Ingress. - - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -6. Make a request on the URL you would usually query for that environment. The request should **not** be routed to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) will route to services in the cluster like normal. - -7. Share with a teammate. - - You can collaborate with teammates by sending your preview URL to them. They will be asked to log in to Ambassador Cloud if they are not already. Upon log in they must select the same identity provider and org as you are using; that is how they are authorized to access the preview URL (see the [list of supported identity providers](../../faqs/#idps)). When they visit the preview URL, they will see the intercepted service running on your laptop. - - - Congratulations! You have now created a dev environment and shared it with a teammate! While you and your partner work together to debug your service, the production version remains unchanged to the rest of your team until you commit your changes. - - -## Sharing a preview URL with people outside your team - -To collaborate with someone outside of your identity provider's organization, you must go to [Ambassador Cloud](https://app.getambassador.io/cloud/), select the preview URL, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the intercept either from the dashboard or by running `telepresence leave ` also removes all access to the preview URL. diff --git a/docs/v2.2/images/container-inner-dev-loop.png b/docs/v2.2/images/container-inner-dev-loop.png deleted file mode 100644 index 06586cd6..00000000 Binary files a/docs/v2.2/images/container-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.2/images/github-login.png b/docs/v2.2/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/v2.2/images/github-login.png and /dev/null differ diff --git a/docs/v2.2/images/logo.png b/docs/v2.2/images/logo.png deleted file mode 100644 index 701f63ba..00000000 Binary files a/docs/v2.2/images/logo.png and /dev/null differ diff --git a/docs/v2.2/images/trad-inner-dev-loop.png b/docs/v2.2/images/trad-inner-dev-loop.png deleted file mode 100644 index 618b674f..00000000 Binary files a/docs/v2.2/images/trad-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.2/install/index.md b/docs/v2.2/install/index.md deleted file mode 100644 index a6d423db..00000000 --- a/docs/v2.2/install/index.md +++ /dev/null @@ -1,55 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install - -Install Telepresence by running the commands below for your OS. - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## What's Next? - -Follow one of our [quick start guides](../quick-start/) to start using Telepresence, either with our sample app or in your own environment. - -## Installing older versions of Telepresence - -Use these URLs to download an older version for your OS, replacing `x.y.z` with the versions you want. - - - - -``` -https://app.getambassador.io/download/tel2/darwin/amd64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/x.y.z/telepresence -``` - - - diff --git a/docs/v2.2/install/migrate-from-legacy.md b/docs/v2.2/install/migrate-from-legacy.md deleted file mode 100644 index a61e17af..00000000 --- a/docs/v2.2/install/migrate-from-legacy.md +++ /dev/null @@ -1,98 +0,0 @@ -# Migrate from legacy Telepresence - -Telepresence (formerly referenced as Telepresence 2, which is the current major version) has different mechanics and requires a different mental model from [legacy Telepresence 1](https://www.telepresence.io/docs/v1/) when working with local instances of your services. - -In legacy Telepresence, a pod running a service was swapped with a pod running the Telepresence proxy. This proxy received traffic intended for the service, and sent the traffic onward to the target workstation or laptop. We called this mechanism "swap-deployment". - -In practice, this mechanism, while simple in concept, had some challenges. Losing the connection to the cluster would leave the deployment in an inconsistent state. Swapping the pods would take time. - -Telepresence introduces a [new architecture](../../reference/architecture/) built around "intercepts" that addresses these problems. With Telepresence, a sidecar proxy is injected onto the pod. The proxy then intercepts traffic intended for the pod and routes it to the workstation/laptop. The advantage of this approach is that the service is running at all times, and no swapping is used. By using the proxy approach, we can also do selective intercepts, where certain types of traffic get routed to the service while other traffic gets routed to your laptop/workstation. - -Please see [the Telepresence quick start](../../quick-start/) for an introduction to running intercepts and [the intercept reference doc](../../reference/intercepts/) for a deep dive into intercepts. - -## Using legacy Telepresence commands - -First please ensure you've [installed Telepresence](../). - -Telepresence is able to translate common legacy Telepresence commands into native Telepresence commands. -So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used -to with the Telepresence binary. - -For example, say you have a deployment (`myserver`) that you want to swap deployment (equivalent to intercept in -Telepresence) with a python server, you could run the following command: - -``` -$ telepresence --swap-deployment myserver --expose 9090 --run python3 -m http.server 9090 -< help text > - -Legacy telepresence command used -Command roughly translates to the following in Telepresence: -telepresence intercept echo-easy --port 9090 -- python3 -m http.server 9090 -running... -Connecting to traffic manager... -Connected to context -Using Deployment myserver -intercepted - Intercept name : myserver - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:9090 - Intercepting : all TCP connections -Serving HTTP on :: port 9090 (http://[::]:9090/) ... -``` - -Telepresence will let you know what the legacy Telepresence command has mapped to and automatically -runs it. So you can get started with Telepresence today, using the commands you are used to -and it will help you learn the Telepresence syntax. - -### Legacy command mapping - -Below is the mapping of legacy Telepresence to Telepresence commands (where they exist and -are supported). - -| Legacy Telepresence Command | Telepresence Command | -|--------------------------------------------------|--------------------------------------------| -| --swap-deployment $workload | intercept $workload | -| --expose localPort[:remotePort] | intercept --port localPort[:remotePort] | -| --swap-deployment $workload --run-shell | intercept $workload -- bash | -| --swap-deployment $workload --run $cmd | intercept $workload -- $cmd | -| --swap-deployment $workload --docker-run $cmd | intercept $workload --docker-run -- $cmd | -| --run-shell | connect -- bash | -| --run $cmd | connect -- $cmd | -| --env-file,--env-json | --env-file, --env-json (haven't changed) | -| --context,--namespace | --context, --namespace (haven't changed) | -| --mount,--docker-mount | --mount, --docker-mount (haven't changed) | - -### Legacy Telepresence command limitations - -Some of the commands and flags from legacy Telepresence either didn't apply to Telepresence or -aren't yet supported in Telepresence. For some known popular commands, such as --method, -Telepresence will include output letting you know that the flag has gone away. For flags that -Telepresence can't translate yet, it will let you know that that flag is "unsupported". - -If Telepresence is missing any flags or functionality that is integral to your usage, please let us know -by [creating an issue](https://github.com/telepresenceio/telepresence/issues) and/or talking to us on our [Slack channel](https://a8r.io/Slack)! - -## Telepresence changes - -Telepresence installs a Traffic Manager in the cluster and Traffic Agents alongside workloads when performing intercepts (including -with `--swap-deployment`) and leaves them. If you use `--swap-deployment`, the intercept will be left once the process -dies, but the agent will remain. There's no harm in leaving the agent running alongside your service, but when you -want to remove them from the cluster, the following Telepresence command will help: -``` -$ telepresence uninstall --help -Uninstall telepresence agents and manager - -Usage: - telepresence uninstall [flags] { --agent |--all-agents | --everything } - -Flags: - -d, --agent uninstall intercept agent on specific deployments - -a, --all-agents uninstall intercept agent on all deployments - -e, --everything uninstall agents and the traffic manager - -h, --help help for uninstall - -n, --namespace string If present, the namespace scope for this CLI request -``` - -Since the new architecture deploys a Traffic Manager into the Ambassador namespace, please take a look at -our [rbac guide](../../reference/rbac) if you run into any issues with permissions while upgrading to Telepresence. diff --git a/docs/v2.2/install/upgrade.md b/docs/v2.2/install/upgrade.md deleted file mode 100644 index 4a2332ab..00000000 --- a/docs/v2.2/install/upgrade.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -import Platform from '@src/components/Platform'; - -# Upgrade Process -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -After upgrading your CLI, the Traffic Manager **must be uninstalled** from your cluster. This can be done using `telepresence uninstall --everything` or by `kubectl delete svc,deploy -n ambassador traffic-manager`. The next time you run a `telepresence` command it will deploy an upgraded Traffic Manager. diff --git a/docs/v2.2/quick-start/TelepresenceQuickStartLanding.js b/docs/v2.2/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 537a6325..00000000 --- a/docs/v2.2/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/v2.2/quick-start/demo-node.md b/docs/v2.2/quick-start/demo-node.md deleted file mode 100644 index 9d0aef77..00000000 --- a/docs/v2.2/quick-start/demo-node.md +++ /dev/null @@ -1,288 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import QSCards from './qs-cards' - -# Telepresence Quick Start - -
-

Contents

- -* [1. Download the demo cluster archive](#1-download-the-demo-cluster-archive) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Check out the sample application](#3-check-out-the-sample-application) -* [4. Run a service on your laptop](#4-run-a-service-on-your-laptop) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -In this guide we'll give you **everything you need in a preconfigured demo cluster:** the Telepresence CLI, a config file for connecting to your demo cluster, and code to run a cluster service locally. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have a version in React if you prefer. - - - - Already have a cluster? Switch over to a version of this guide that takes you though the same steps using your own cluster. - - -## 1. Download the demo cluster archive - -1. {window.open('https://app.getambassador.io/cloud/demo-cluster-download-popup', 'ambassador-cloud-demo-cluster', 'menubar=no,location=no,resizable=yes,scrollbars=yes,status=no,width=550,height=750'); e.preventDefault(); }} target="_blank">Sign in to Ambassador Cloud to download your demo cluster archive. The archive contains all the tools and configurations you need to complete this guide. - -2. Extract the archive file, open the `ambassador-demo-cluster` folder, and run the installer script (the commands below might vary based on where your browser saves downloaded files). - - - This step will also install some dependency packages onto your laptop using npm, you can see those packages at ambassador-demo-cluster/edgey-corp-nodejs/DataProcessingService/package.json. - - - ``` - cd ~/Downloads - unzip ambassador-demo-cluster.zip -d ambassador-demo-cluster - cd ambassador-demo-cluster - ./install.sh - ``` - -3. The demo cluster we provided already has a demo app running. List the app's services: - `kubectl get services` - - ``` - $ kubectl get services - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.43.0.1 443/TCP 14h - dataprocessingservice ClusterIP 10.43.159.239 3000/TCP 14h - verylargejavaservice ClusterIP 10.43.223.61 8080/TCP 14h - verylargedatastore ClusterIP 10.43.203.19 8080/TCP 14h - ``` - -4. Confirm that the Telepresence CLI is now installed (we expect to see the daemons are not running yet): -`telepresence status` - - ``` - $ telepresence status - - Root Daemon: Not running - User Daemon: Not running - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence status command. - - - - You now have Telepresence installed on your workstation and a Kubernetes cluster configured in your terminal. - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster (this requires root privileges and will ask for your password): -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Check out the sample application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - -We'll use a sample app that is already installed in your demo cluster. Let's take a quick look at it's architecture before continuing. - -1. Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -2. Since you’ve already connected Telepresence to your cluster, you can access the frontend service in your browser at http://verylargejavaservice.default:8080. - -3. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Run a service on your laptop - -Now start up the DataProcessingService service on your laptop. This version of the code has the UI color set to blue instead of green. - -1. **In a new terminal window**, go the demo application directory in the extracted archive folder: - `cd edgey-corp-nodejs/DataProcessingService` - -2. Start the application: - `npm start` - - ``` - $ npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - -4. **Back in your previous terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - - Didn't work? Make sure you are working in the terminal window where you ran the script because it sets environment variables to access the demo cluster. Those variables will only will apply to that terminal session. - - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - ... - ``` - -2. Go to the frontend service again in your browser at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. The frontend `verylargejavaservice` is still running on the cluster, but it's request to the `DataProcessingService` for retrieve the color to show is being proxied by Telepresence to your laptop. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n`. The default for the fourth value is correct so hit enter to accept it - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: n - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.2/quick-start/demo-react.md b/docs/v2.2/quick-start/demo-react.md deleted file mode 100644 index 7471f23f..00000000 --- a/docs/v2.2/quick-start/demo-react.md +++ /dev/null @@ -1,255 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import QSCards from './qs-cards' - -# Telepresence Quick Start - React - -
-

Contents

- -* [1. Download the demo cluster archive](#1-download-the-demo-cluster-archive) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Set up the sample application](#3-set-up-the-sample-application) -* [4. Test app](#4-test-app) -* [5. Run a service on your laptop](#5-run-a-service-on-your-laptop) -* [6. Make a code change](#6-make-a-code-change) -* [7. Intercept all traffic to the service](#7-intercept-all-traffic-to-the-service) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -In this guide we'll give you **everything you need in a preconfigured demo cluster:** the Telepresence CLI, a config file for connecting to your demo cluster, and code to run a cluster service locally. - - - While Telepresence works with any language, this guide uses a sample app with a frontend written in React. We have a version with a Node.js backend if you prefer. - - - - -## 1. Download the demo cluster archive - -1. {window.open('https://app.getambassador.io/cloud/demo-cluster-download-popup', 'ambassador-cloud-demo-cluster', 'menubar=no,location=no,resizable=yes,scrollbars=yes,status=no,width=550,height=750'); e.preventDefault(); }} target="_blank">Sign in to Ambassador Cloud to download your demo cluster archive. The archive contains all the tools and configurations you need to complete this guide. - -2. Extract the archive file, open the `ambassador-demo-cluster` folder, and run the installer script (the commands below might vary based on where your browser saves downloaded files). - - - This step will also install some dependency packages onto your laptop using npm, you can see those packages at ambassador-demo-cluster/edgey-corp-nodejs/DataProcessingService/package.json. - - - ``` - cd ~/Downloads - unzip ambassador-demo-cluster.zip -d ambassador-demo-cluster - cd ambassador-demo-cluster - ./install.sh - # type y to install the npm dependencies when asked - ``` - -3. Confirm that your `kubectl` is configured to use the demo cluster by getting the status of the cluster nodes, you should see a single node named `tpdemo-prod-...`: - `kubectl get nodes` - - ``` - $ kubectl get nodes - - NAME STATUS ROLES AGE VERSION - tpdemo-prod-1234 Ready control-plane,master 5d10h v1.20.2+k3s1 - ``` - -4. Confirm that the Telepresence CLI is now installed (we expect to see the daemons are not running yet): -`telepresence status` - - ``` - $ telepresence status - - Root Daemon: Not running - User Daemon: Not running - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence status command. - - - - You now have Telepresence installed on your workstation and a Kubernetes cluster configured in your terminal! - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster (this requires **root** privileges and will ask for your password): -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Set up the sample application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - -1. Clone the emojivoto app: -`git clone https://github.com/datawire/emojivoto.git` - -1. Deploy the app to your cluster: -`kubectl apply -k emojivoto/kustomize/deployment` - -1. Change the kubectl namespace: -`kubectl config set-context --current --namespace=emojivoto` - -1. List the Services: -`kubectl get svc` - - ``` - $ kubectl get svc - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - emoji-svc ClusterIP 10.43.162.236 8080/TCP,8801/TCP 29s - voting-svc ClusterIP 10.43.51.201 8080/TCP,8801/TCP 29s - web-app ClusterIP 10.43.242.240 80/TCP 29s - web-svc ClusterIP 10.43.182.119 8080/TCP 29s - ``` - -1. Since you’ve already connected Telepresence to your cluster, you can access the frontend service in your browser at [http://web-app.emojivoto](http://web-app.emojivoto). This is the namespace qualified DNS name in the form of `service.namespace`. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Test app - -1. Vote for some emojis and see how the [leaderboard](http://web-app.emojivoto/leaderboard) changes. - -1. There is one emoji that causes an error when you vote for it. Vote for 🍩 and the leaderboard does not actually update. Also an error is shown on the browser dev console: -`GET http://web-svc.emojivoto:8080/api/vote?choice=:doughnut: 500 (Internal Server Error)` - - - Open the dev console in Chrome or Firefox with Option + ⌘ + J (macOS) or Shift + CTRL + J (Windows/Linux).
- Open the dev console in Safari with Option + ⌘ + C. -
- -The error is on a backend service, so **we can add an error page to notify the user** while the bug is fixed. - -## 5. Run a service on your laptop - -Now start up the `web-app` service on your laptop. We'll then make a code change and intercept this service so that we can see the immediate results of a code change to the service. - -1. **In a new terminal window**, change into the repo directory and build the application: - - `cd /emojivoto` - `make web-app-local` - - ``` - $ make web-app-local - - ... - webpack 5.34.0 compiled successfully in 4326 ms - ✨ Done in 5.38s. - ``` - -2. Change into the service's code directory and start the server: - - `cd emojivoto-web-app` - `yarn webpack serve` - - ``` - $ yarn webpack serve - - ... - ℹ 「wds」: Project is running at http://localhost:8080/ - ... - ℹ 「wdm」: Compiled successfully. - ``` - -4. Access the application at [http://localhost:8080](http://localhost:8080) and see how voting for the 🍩 is generating the same error as the application deployed in the cluster. - - - Victory, your local React server is running a-ok! - - -## 6. Make a code change -We’ve now set up a local development environment for the app. Next we'll make and locally test a code change to the app to improve the issue with voting for 🍩. - -1. In the terminal running webpack, stop the server with `Ctrl+c`. - -1. In your preferred editor open the file `emojivoto/emojivoto-web-app/js/components/Vote.jsx` and replace the `render()` function (lines 83 to the end) with [this highlighted code snippet](https://github.com/datawire/emojivoto/blob/main/assets/Vote-fixed.jsx#L83-L149). - -1. Run webpack to fully recompile the code then start the server again: - - `yarn webpack` - `yarn webpack serve` - -1. Reload the browser tab showing [http://localhost:8080](http://localhost:8080) and vote for 🍩. Notice how you see an error instead, improving the user experience. - -## 7. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the app to the version running locally instead. - - - This command must be run in the terminal window where you ran the script because the script set environment variables to access the demo cluster. Those variables will only will apply to that terminal session. - - -1. Start the intercept with the `intercept` command, setting the workload name (a Deployment in this case), namespace, and port: -`telepresence intercept web-app --namespace emojivoto --port 8080` - - ``` - $ telepresence intercept web-app --namespace emojivoto --port 8080 - - Using deployment web-app - intercepted - Intercept name: web-app-emojivoto - State : ACTIVE - ... - ``` - -2. Go to the frontend service again in your browser at [http://web-app.emojivoto](http://web-app.emojivoto). Voting for 🍩 should now show an error message to the user. - - - The web-app Deployment is being intercepted and rerouted to the server on your laptop! - - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## What's Next? - - diff --git a/docs/v2.2/quick-start/go.md b/docs/v2.2/quick-start/go.md deleted file mode 120000 index c884a46c..00000000 --- a/docs/v2.2/quick-start/go.md +++ /dev/null @@ -1 +0,0 @@ -qs-go.md \ No newline at end of file diff --git a/docs/v2.2/quick-start/index.md b/docs/v2.2/quick-start/index.md deleted file mode 100644 index efcb65b5..00000000 --- a/docs/v2.2/quick-start/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- - description: Telepresence Quick Start. ---- - -import TelepresenceQuickStartLanding from './TelepresenceQuickStartLanding' - - diff --git a/docs/v2.2/quick-start/qs-cards.js b/docs/v2.2/quick-start/qs-cards.js deleted file mode 100644 index 31582355..00000000 --- a/docs/v2.2/quick-start/qs-cards.js +++ /dev/null @@ -1,70 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - - - - - FAQs - - - - Learn more about uses cases and the technical implementation of - Telepresence. - - - - -
- ); -} diff --git a/docs/v2.2/quick-start/qs-go.md b/docs/v2.2/quick-start/qs-go.md deleted file mode 100644 index 87b5d600..00000000 --- a/docs/v2.2/quick-start/qs-go.md +++ /dev/null @@ -1,343 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.2/quick-start/qs-java.md b/docs/v2.2/quick-start/qs-java.md deleted file mode 100644 index 0b039096..00000000 --- a/docs/v2.2/quick-start/qs-java.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.2/quick-start/qs-node.md b/docs/v2.2/quick-start/qs-node.md deleted file mode 100644 index 806d9d47..00000000 --- a/docs/v2.2/quick-start/qs-node.md +++ /dev/null @@ -1,331 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.2/quick-start/qs-python-fastapi.md b/docs/v2.2/quick-start/qs-python-fastapi.md deleted file mode 100644 index 24f86037..00000000 --- a/docs/v2.2/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,328 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.2/quick-start/qs-python.md b/docs/v2.2/quick-start/qs-python.md deleted file mode 100644 index 4d79336e..00000000 --- a/docs/v2.2/quick-start/qs-python.md +++ /dev/null @@ -1,339 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites -You’ll need [`kubectl` installed](https://kubernetes.io/docs/tasks/tools/#kubectl) -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Login to Ambassador Cloud, a web interface for managing and sharing preview URLs: -`telepresence login` - - This opens your browser; login with your preferred identity provider and choose your org. - - ``` - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.2/quick-start/telepresence-quickstart-landing.less b/docs/v2.2/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index 1a8c3ddc..00000000 --- a/docs/v2.2/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,185 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.doc-body .telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1, - h2 { - color: @blue-dark; - font-style: normal; - font-weight: normal; - letter-spacing: 0.25px; - } - - h1 { - font-size: 33px; - line-height: 40px; - - svg { - vertical-align: text-bottom; - } - } - - h2 { - font-size: 23px; - line-height: 33px; - margin: 0 0 1rem; - - .highlight-mark { - background: transparent; - color: @blue-dark; - background: -moz-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -webkit-gradient( - linear, - left top, - left bottom, - color-stop(0%, transparent), - color-stop(60%, transparent), - color-stop(60%, fade(@blue-electric, 15%)), - color-stop(100%, fade(@blue-electric, 15%)) - ); - background: -webkit-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -o-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -ms-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: linear-gradient( - to bottom, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='transparent', endColorstr='fade(@blue-electric, 15%)',GradientType=0 ); - padding: 0 3px; - margin: 0 0.1em 0 0; - } - } - - .telepresence-choice { - background: @white; - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - - strong { - color: @blue; - } - } - - .telepresence-choice-wrapper { - border-bottom: solid 1px @grey-separator; - column-gap: 60px; - display: inline-grid; - grid-template-columns: repeat(2, 1fr); - margin: 20px 0 50px; - padding: 0 0 62px; - width: 100%; - - .telepresence-choice { - ol { - li { - font-size: 14px; - } - } - - .get-started-button { - background-color: @green; - border-radius: 5px; - color: @white; - display: inline-flex; - font-style: normal; - font-weight: 600; - font-size: 14px; - line-height: 24px; - margin: 0 0 15px 5px; - padding: 13px 20px; - align-items: center; - letter-spacing: 1.25px; - text-decoration: none; - text-transform: uppercase; - transition: background-color 200ms linear 0ms; - - svg { - fill: @white; - height: 20px; - width: 20px; - } - - &:hover { - background-color: @green-dark; - text-decoration: none; - } - } - - p { - font-style: normal; - font-weight: normal; - font-size: 16px; - line-height: 26px; - letter-spacing: 0.5px; - } - } - } - - .video-wrapper { - display: flex; - flex-direction: row; - - ul { - li { - font-size: 14px; - margin: 0 10px 10px 0; - } - } - - div { - &.video-container { - flex: 1 1 70%; - position: relative; - width: 100%; - padding-bottom: 39.375%; - - .video { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; - } - } - - &.description { - flex: 0 1 30%; - } - } - } -} diff --git a/docs/v2.2/redirects.yml b/docs/v2.2/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/v2.2/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/v2.2/reference/architecture.md b/docs/v2.2/reference/architecture.md deleted file mode 100644 index 47facb0b..00000000 --- a/docs/v2.2/reference/architecture.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `kubectl describe -pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/v2.2/reference/client.md b/docs/v2.2/reference/client.md deleted file mode 100644 index 2251876c..00000000 --- a/docs/v2.2/reference/client.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| `login` | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `preview` | Create or remove [preview URLs](../../howtos/preview-urls) for existing intercepts: `telepresence preview create ` | -| `status` | Shows the current connectivity status | -| `quit` | Quits the local daemon, stopping all intercepts and outbound traffic to the cluster| -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000`. A special flag `--docker-run` can be used to run the local instance [in a docker container](../docker-run). | -| `leave` | Stops an active intercept: `telepresence leave hello` | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. diff --git a/docs/v2.2/reference/cluster-config.md b/docs/v2.2/reference/cluster-config.md deleted file mode 100644 index 125c536a..00000000 --- a/docs/v2.2/reference/cluster-config.md +++ /dev/null @@ -1,120 +0,0 @@ -# Cluster-side configuration - -For the most part, Telepresence doesn't require any special -configuration in the cluster and can be used right away in any -cluster (as long as the user has adequate [RBAC permissions](../rbac)). - -However, some advanced features do require some configuration in the -cluster. - -## TLS - -In this example, other applications in the cluster expect to speak TLS to your -intercepted application (perhaps you're using a service-mesh that does -mTLS). - -In order to use `--mechanism=http` (or any features that imply -`--mechanism=http`) you need to tell Telepresence about the TLS -certificates in use. - -Tell Telepresence about the certificates in use by adjusting your -[workload's](../intercepts/#supported-workloads) Pod template to set a couple of -annotations on the intercepted Pods: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ "getambassador.io/inject-terminating-tls-secret": "your-terminating-secret" # optional -+ "getambassador.io/inject-originating-tls-secret": "your-originating-secret" # optional - spec: -+ serviceAccountName: "your-account-that-has-rbac-to-read-those-secrets" - containers: -``` - -- The `getambassador.io/inject-terminating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS server - certificate to use for decrypting and responding to incoming - requests. - - When Telepresence modifies the Service and workload port - definitions to point at the Telepresence Agent sidecar's port - instead of your application's actual port, the sidecar will use this - certificate to terminate TLS. - -- The `getambassador.io/inject-originating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS - client certificate to use for communicating with your application. - - You will need to set this if your application expects incoming - requests to speak TLS (for example, your - code expects to handle mTLS itself instead of letting a service-mesh - sidecar handle mTLS for it, or the port definition that Telepresence - modified pointed at the service-mesh sidecar instead of at your - application). - - If you do set this, you should to set it to the - same client certificate Secret that you configure the Ambassador - Edge Stack to use for mTLS. - -It is only possible to refer to a Secret that is in the same Namespace -as the Pod. - -The Pod will need to have permission to `get` and `watch` each of -those Secrets. - -Telepresence understands `type: kubernetes.io/tls` Secrets and -`type: istio.io/key-and-cert` Secrets; as well as `type: Opaque` -Secrets that it detects to be formatted as one of those types. - -## Air gapped cluster - -If your cluster is air gapped (it does not have access to the -internet and therefore cannot connect to Ambassador Cloud), some additional -configuration is required to acquire a license use selective intercepts. - -### Create a license - -1. Go to [the teams setting page in Ambassador Cloud](https://auth.datawire.io/redirects/settings/teams) and -select *Licenses* for the team you want to create the license for. - -2. Generate a new license (if one doesn't already exist) by clicking *Generate New License*. - -3. You will be prompted for your Cluster ID. Ensure your -kubeconfig context is using the cluster you want to create a license for then -run this command to generate the Cluster ID: - - ``` - $ telepresence current-cluster-id - - Cluster ID: - ``` - -4. Click *Generate API Key* to finish generating the license. - -### Add license to cluster - -1. On the licenses page, download the license file associated with your cluster. - -2. Use this command to generate a Kubernetes Secret config using the license file: - - ``` - $ telepresence license -f - - apiVersion: v1 - data: - hostDomain: - license: - kind: Secret - metadata: - creationTimestamp: null - name: systema-license - namespace: ambassador - ``` - -3. Save the output as a YAML file and apply it to your -cluster with `kubectl`. Once applied, you will be able to use selective intercepts with the -`--preview-url=false` flag (since use of preview URLs requires a connection to Ambassador Cloud). diff --git a/docs/v2.2/reference/config.md b/docs/v2.2/reference/config.md deleted file mode 100644 index ac81202a..00000000 --- a/docs/v2.2/reference/config.md +++ /dev/null @@ -1,32 +0,0 @@ -# Laptop-side configuration - -Telepresence uses a `config.yml` file to store and change certain values. The location of this file varies based on your OS: - -* macOS: `$HOME/Library/Application Support/telepresence/config.yml` -* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` - -For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. - -## Values - -The config file currently only supports values for the `timeouts` key, here is an example file: - -```yaml -timeouts: - agentInstall: 1m - intercept: 10s -``` - -Values are all durations either as a number respresenting seconds or a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings can be fractional (`1.5h`) or combined (`2h45m`). - -These are the valid fields for the `timeouts` key: - -|Field|Description|Default| -|---|---|---| -|`agentInstall`|Waiting for Traffic Agent to be installed|2 minutes| -|`apply`|Waiting for a Kubernetes manifest to be applied|1 minute| -|`clusterConnect`|Waiting for cluster to be connected|20 seconds| -|`intercept`|Waiting for an intercept to become active|5 seconds| -|`proxyDial`|Waiting for an outbound connection to be established|5 seconds| -|`trafficManagerConnect`|Waiting for the Traffic Manager API to connect for port fowards|20 seconds| -|`trafficManagerAPI`|Waiting for connection to the gPRC API after `trafficManagerConnect` is successful|5 seconds| diff --git a/docs/v2.2/reference/dns.md b/docs/v2.2/reference/dns.md deleted file mode 100644 index bdae98d6..00000000 --- a/docs/v2.2/reference/dns.md +++ /dev/null @@ -1,68 +0,0 @@ -# DNS resolution - -The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces of currently active intercepts. Processes running locally on the desktop will have network access to all services in the such namespaces by service-name only. - -All intercepts contribute to the DNS resolver, even those that do not use the `--namespace=` option. This is because `--namespace default` is implied, and in this context, `default` is treated just like any other namespace. - -No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). - -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. - -No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. - -``` -$ telepresence connect - - Connecting to traffic manager... - Connected to context default (https://) - -$ telepresence list - - verylargejavaservice : ready to intercept (traffic-agent not yet installed) - dataprocessingservice: ready to intercept (traffic-agent not yet installed) - verylargedatastore : ready to intercept (traffic-agent not yet installed) - -$ curl verylargejavaservice:8080 - - curl: (6) Could not resolve host: verylargejavaservice - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl verylargejavaservice.default:8080 - - - - - Welcome to the EdgyCorp WebApp - ... -``` - -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. - -``` -$ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - -$ curl verylargejavaservice:8080 - - - - - Welcome to the EdgyCorp WebApp - ... -``` - -Now curling that service by its short name works and will as long as the intercept is active. - -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. diff --git a/docs/v2.2/reference/docker-run.md b/docs/v2.2/reference/docker-run.md deleted file mode 100644 index 2262f0a5..00000000 --- a/docs/v2.2/reference/docker-run.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -Description: "How a Telepresence intercept can run a Docker container with configured environment and volume mounts." ---- - -# Using Docker for intercepts - -If you want your intercept to go to a Docker container on your laptop, use the `--docker-run` option. It creates the intercept, runs your container in the foreground, then automatically ends the intercept when the container exits. - -`telepresence intercept --port --docker-run -- ` - -The `--` separates flags intended for `telepresence intercept` from flags intended for `docker run`. - -## Example - -Imagine you are working on a new version of a your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. - -`telepresence intercept frontend-v1 --port 8000 --docker-run -- frontend-v2` - -## Ports - -The `--port` flag can specify an additional port when `--docker-run` is used so that the local and container port can be different. This is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. - -## Flags - -Telepresence will automatically pass some relevant flags to Docker in order to connect the container with the intercept. Those flags are combined with the arguments given after `--` on the command line. - -- `--dns-search tel2-search` Enables single label name lookups in intercepted namespaces -- `--env-file ` Loads the intercepted environment -- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line -- `-p ` The local port for the intercept and the container port -- `-v ` Volume mount specification, see CLI help for `--mount` and `--docker-mount` flags for more info diff --git a/docs/v2.2/reference/environment.md b/docs/v2.2/reference/environment.md deleted file mode 100644 index b5a799cc..00000000 --- a/docs/v2.2/reference/environment.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." ---- - -# Environment variables - -Telepresence can import environment variables from the cluster pod when running an intercept. -You can then use these variables with the code running on your laptop of the service being intercepted. - -There are three options available to do this: - -1. `telepresence intercept [service] --port [port] --env-file=FILENAME` - - This will write the environment variables to a Docker Compose `.env` file. This file can be used with `docker-compose` when starting containers locally. Please see the Docker documentation regarding the [file syntax](https://docs.docker.com/compose/env-file/) and [usage](https://docs.docker.com/compose/environment-variables/) for more information. - -2. `telepresence intercept [service] --port [port] --env-json=FILENAME` - - This will write the environment variables to a JSON file. This file can be injected into other build processes. - -3. `telepresence intercept [service] --port [port] -- [COMMAND]` - - This will run a command locally with the pod's environment variables set on your laptop. Once the command quits the intercept is stopped (as if `telepresence leave [service]` was run). This can be used in conjunction with a local server command, such as `python [FILENAME]` or `node [FILENAME]` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. - - Another use would be running a subshell, Bash for example: - - `telepresence intercept [service] --port [port] -- /bin/bash` - - This would start the intercept then launch the subshell on your laptop with all the same variables set as on the pod. diff --git a/docs/v2.2/reference/intercepts.md b/docs/v2.2/reference/intercepts.md deleted file mode 100644 index 1fa0f187..00000000 --- a/docs/v2.2/reference/intercepts.md +++ /dev/null @@ -1,127 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Intercepts - -## Intercept behavior when logged into Ambassador Cloud - -After logging into Ambassador Cloud (with `telepresence login`), Telepresence will default to `--preview-url=true`, which will use Ambassador Cloud to create a sharable preview URL for this intercept. (Creating an intercept without logging in will default to `--preview-url=false`). - -In order to do this, it will prompt you for four options. For the first, `Ingress`, Telepresence tries to intelligently determine the ingress controller deployment and namespace for you. If they are correct, you can hit `enter` to accept the defaults. Set the next two options, `TLS` and `Port`, appropriately based on your ingress service. The fourth is a hostname for the service, if required by your ingress. - -Also because you're logged in, Telepresence will default to `--mechanism=http --http-match=auto` (or just `--http-match=auto`; `--http-match` implies `--mechanism=http`). If you hadn't been logged in it would have defaulted to `--mechanism=tcp`. This tells it to do smart intercepts and only intercept a subset of HTTP requests, rather than just intercepting the entirety of all TCP connections. This is important for working in a shared cluster with teammates, and is important for the preview URL functionality. See `telepresence intercept --help` for information on using `--http-match` to customize which requests it intercepts. - -## Supported workloads - -Kubernetes has various [workloads](https://kubernetes.io/docs/concepts/workloads/). Currently, telepresence supports intercepting Deployments, ReplicaSets, and StatefulSets. - While many of our examples may use Deployments, they would also work on ReplicaSets and StatefulSets - -## Specifying a namespace for an intercept - -The namespace of the intercepted workload is specified using the `--namespace` option. When this option is used, and `--workload` is not used, then the given name is interpreted as the name of the workload and the name of the intercept will be constructed from that name and the namespace. - -``` -telepresence intercept hello --namespace myns --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept -`hello-myns`. In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the workload is specified. - -``` -telepresence intercept myhello --namespace myns --workload hello --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept `myhello`. - -## Importing environment variables - -Telepresence can import the environment variables from the pod that is being intercepted, see [this doc](../environment/) for more details. - -## Creating an intercept Without a preview URL - -If you *are not* logged into Ambassador Cloud, the following command will intercept all traffic bound to the service and proxy it to your laptop. This includes traffic coming through your ingress controller, so use this option carefully as to not disrupt production environments. - -``` -telepresence intercept --port= -``` - -If you *are* logged into Ambassador Cloud, setting the `preview-url` flag to `false` is necessary. - -``` -telepresence intercept --port= --preview-url=false -``` - -This will output a header that you can set on your request for that traffic to be intercepted: - -``` -$ telepresence intercept --port= --preview-url=false -Using Deployment -intercepted - Intercept name: - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -``` -$ telepresence status -Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" -User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 1 total - dataprocessingnodeservice: @ -``` - -Finally, run `telepresence leave ` to stop the intercept. - -## Creating an intercept when a service has multiple ports - -If you are trying to intercept a service that has multiple ports, you need to tell telepresence which service port you are trying to intercept. To specify, you can either use the name of the service port or the port number itself. To see which options might be available to you and your service, use kubectl to describe your service or look in the object's YAML. For more information on multiple ports, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services). - -``` -$ telepresence intercept --port=: -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -When intercepting a service that has multiple ports, the name of the service port that has been intercepted is also listed. - -If you want to change which port has been intercepted, you can create a new intercept the same way you did above and it will change which service port is being intercepted. - -## Creating an intercept When multiple services match your workload - -Oftentimes, there's a 1-to-1 relationship between a service and a workload, so telepresence is able to auto-detect which service it should intercept based on the workload you are trying to intercept. But if you use something like [Argo](../../../../argo/latest/), it uses two services (that use the same labels) to manage traffic between a canary and a stable service. - -Fortunately, if you know which service you want to use when intercepting a workload, you can use the --service flag. So in the aforementioned demo, if you wanted to use the `echo-stable` service when intercepting your workload, your command would look like this: -``` -$ telepresence intercept echo-rollout- --port --service echo-stable -Using ReplicaSet echo-rollout- -intercepted - Intercept name : echo-rollout- - State : ACTIVE - Workload kind : ReplicaSet - Destination : 127.0.0.1:3000 - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 - Intercepting : all TCP connections -``` diff --git a/docs/v2.2/reference/linkerd.md b/docs/v2.2/reference/linkerd.md deleted file mode 100644 index 7b184cb4..00000000 --- a/docs/v2.2/reference/linkerd.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -Description: "How to get Linkerd meshed services working with Telepresence" ---- - -# Using Telepresence with Linkerd - -## Introduction -Getting started with Telepresence on Linkerd services is as simple as adding an annotation to your Deployment: - -```yaml -spec: - template: - metadata: - annotations: - config.linkerd.io/skip-outbound-ports: "8081,8022,6000-7999" -``` - -The Traffic Agent uses port 8081 for its API, 8022 for SSHFS, and 6001 for the actual tunnel between the Traffic Manager and the local system. Telling Linkerd to skip these ports allows the Traffic Agent sidecar to fully communicate with the Traffic Manager, and therefore the rest of the Telepresence system. - -## Prerequisites -1. [Telepresence binary](../../install) -2. Linkerd control plane [installed to cluster](https://linkerd.io/2.10/tasks/install/) -3. Kubectl -4. [Working ingress controller](../../../../edge-stack/latest/howtos/linkerd2) - -## Deploy -Save and deploy the following YAML. Note the `config.linkerd.io/skip-outbound-ports` annotation in the metadata of the pod template. - -```yaml ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: quote -spec: - replicas: 1 - selector: - matchLabels: - app: quote - strategy: - type: RollingUpdate - template: - metadata: - annotations: - linkerd.io/inject: "enabled" - config.linkerd.io/skip-outbound-ports: "8081,8022,6001" - labels: - app: quote - spec: - containers: - - name: backend - image: docker.io/datawire/quote:0.4.1 - ports: - - name: http - containerPort: 8000 - env: - - name: PORT - value: "8000" - resources: - limits: - cpu: "0.1" - memory: 100Mi -``` - -## Connect to Telepresence -Run `telepresence connect` to connect to the cluster. Then `telepresence list` should show the `quote` deployment as `ready to intercept`: - -``` -$ telepresence list - - quote: ready to intercept (traffic-agent not yet installed) -``` - -## Run the intercept -Run `telepresence intercept quote --port 8080:80` to direct traffic from the `quote` deployment to port 8080 on your local system. Assuming you have something listening on 8080, you should now be able to see your local service whenever attempting to access the `quote` service. diff --git a/docs/v2.2/reference/rbac.md b/docs/v2.2/reference/rbac.md deleted file mode 100644 index c6bb9028..00000000 --- a/docs/v2.2/reference/rbac.md +++ /dev/null @@ -1,199 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Telepresence RBAC -The intention of this document is to provide a template for securing and limiting the permissions of Telepresence. -This documentation will not cover the full extent of permissions necessary to administrate Telepresence components in a cluster. [Telepresence administration](/products/telepresence/) requires permissions for creating Service Accounts, ClusterRoles and ClusterRoleBindings, and for creating the `traffic-manager` [deployment](../architecture/#traffic-manager) which is typically done by a full cluster administrator. - -There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [intercept](../../howtos/intercepts/), and otherwise be unable to affect Kubernetes resources. - -In addition to the above, there is also a consideration of how to manage Users and Groups in Kubernetes which is outside of the scope of the document. This document will use Service Accounts to assign Roles and Bindings. Other methods of RBAC administration and enforcement can be found on the [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) page. - -## Requirements - -- Kubernetes version 1.16+ -- Cluster admin privileges to apply RBAC - -## Editing your kubeconfig - -This guide also assumes that you are utilizing a kubeconfig file that is specified by the `KUBECONFIG` environment variable. This is a `yaml` file that contains the cluster's API endpoint information as well as the user data being supplied for authentication. The Service Account name used in the example below is called tp-user. This can be replaced by any value (i.e. John or Jane) as long as references to the Service Account are consistent throughout the `yaml`. After an administrator has applied the RBAC configuration, a user should create a `config.yaml` in your current directory that looks like the following:​ - -```yaml -apiVersion: v1 -kind: Config -clusters: -- name: my-cluster # Must match the cluster value in the contexts config - cluster: - ## The cluster field is highly cloud dependent. -contexts: -- name: my-context - context: - cluster: my-cluster # Must match the name field in the clusters config - user: tp-user -users: -- name: tp-user # Must match the name of the Service Account created by the cluster admin - user: - token: # See note below -``` - -The Service Account token will be obtained by the cluster administrator after they create the user's Service Account. Creating the Service Account will create an associated Secret in the same namespace with the format `-token-`. This token can be obtained by your cluster administrator by running `kubectl get secret -n ambassador -o jsonpath='{.data.token}' | base64 -d`. - -After creating `config.yaml` in your current directory, export the file's location to KUBECONFIG by running `export KUBECONFIG=$(pwd)/config.yaml`. You should then be able to switch to this context by running `kubectl config use-context my-context`. - -## Cluster-wide telepresence user access - -To allow users to make intercepts across all namespaces, but with more limited `kubectl` permissions, the following `ServiceAccount`, `ClusterRole`, and `ClusterRoleBinding` will allow full `telepresence intercept` functionality. - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate value - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: telepresence-rolebinding -subjects: -- name: tp-user - kind: ServiceAccount - namespace: ambassador -roleRef: - apiGroup: rbac.authorization.k8s.io - name: telepresence-role - kind: ClusterRole -``` - -## Namespace only telepresence user access - -RBAC for multi-tenant scenarios where multiple dev teams are sharing a single cluster where users are constrained to a specific namespace(s). - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate user name - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] ---- -kind: RoleBinding # RBAC to access ambassador namespace -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: t2-ambassador-binding - namespace: ambassador -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding # RoleBinding T2 namespace to be intecpeted -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-test-binding # Update "test" for appropriate namespace to be intercepted - namespace: test # Update "test" for appropriate namespace to be intercepted -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io -​ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-role -rules: -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-binding -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-namespace-role - apiGroup: rbac.authorization.k8s.io -``` diff --git a/docs/v2.2/reference/volume.md b/docs/v2.2/reference/volume.md deleted file mode 100644 index 2e0e8bc5..00000000 --- a/docs/v2.2/reference/volume.md +++ /dev/null @@ -1,36 +0,0 @@ -# Volume mounts - -import Alert from '@material-ui/lab/Alert'; - -Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the intercept, this could be a subshell or local server such as Python or Node. - -``` -telepresence intercept --port --mount=/tmp/ -- /bin/bash -``` - -In this case, Telepresence creates the intercept, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. - -Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point in the output of `telepresence list` or using the `$TELEPRESENCE_ROOT` variable. - -``` -$ telepresence intercept --port --mount=true -- /bin/bash -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 - Intercepting : all TCP connections - -bash-3.2$ echo $TELEPRESENCE_ROOT -/var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 -``` - ---mount=true is the default if a mount option is not specified, use --mount=false to disable mounting volumes. - -With either method, the code you run locally either from the subshell or from the intercept command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilitze the mounted volumes. - -For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. - -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. diff --git a/docs/v2.2/troubleshooting/index.md b/docs/v2.2/troubleshooting/index.md deleted file mode 100644 index 8c6374bf..00000000 --- a/docs/v2.2/troubleshooting/index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an intercept did not generate a preview URL - -Preview URLs are only generated when you are logged into Ambassador Cloud, so that you can use it to manage all your preview URLs. When not logged in, the intercept will not generate a preview URL and will proxy all traffic. Remove the intercept with `telepresence leave [deployment name]`, run `telepresence login` to login to Ambassador Cloud, then recreate the intercept. See the [intercepts how-to doc](../howtos/intercepts) for more details. - -## Error on accessing preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on accessing preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Your GitHub organization isn't listed - -Ambassador Cloud needs access granted to your GitHub organization as a third-party OAuth app. If an org isn't listed during login then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → **Settings** → **Applications** → **Authorized OAuth Apps** → **Ambassador Labs**. An org owner will have a **Grant** button, anyone not an owner will have **Request** which sends an email to the owner. If an access request has been denied in the past the user will not see the **Request** button, they will have to reach out to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in, you should see the GitHub org listed. - -The org owner can go to the **GitHub menu** → **Your organizations** → **[org name]** → **Settings** → **Third-party access** to see if Ambassador Labs has access already or authorize a request for access (only owners will see **Settings** on the org page). Clicking the pencil icon will show the permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or requesting access on initial login - -When using GitHub as your identity provider, the first time you login to Ambassador Cloud GitHub will ask to authorize Ambassador Labs to access your orgs and certain user data. - - - -Any listed org with a green check has already granted access to Ambassador Labs (you still need to authorize to allow Ambassador Labs to read your user data and org membership). - -Any org with a red X requires access to be granted to Ambassador Labs. Owners of the org will see a **Grant** button. Anyone who is not an owner will see a **Request** button. This will send an email to the org owner requesting approval to access the org. If an access request has been denied in the past the user will not see the **Request** button, they will have to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud then back in to select the org. - diff --git a/docs/v2.2/tutorial.md b/docs/v2.2/tutorial.md deleted file mode 100644 index c4296ae5..00000000 --- a/docs/v2.2/tutorial.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for your OS to install it and login to Ambassador Cloud in your browser. Follow the prompts to login with GitHub then select your organization. You will be redirected to the dashboard; later you will manage your preview URLs here. - -### macOS - -``` -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - - -### Linux - -``` -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and logging into Ambassador Cloud. The Cloud dashboard is used to manage your intercepts and share them with colleagues. You must be logged in to create selective intercepts as we are going to do here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/v2.2/versions.yml b/docs/v2.2/versions.yml deleted file mode 100644 index 620baeb0..00000000 --- a/docs/v2.2/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.2.2" -dlVersion: "2.2.2" -docsVersion: "2.2" -branch: release/v2 -productName: "Telepresence" diff --git a/docs/v2.3/community.md b/docs/v2.3/community.md deleted file mode 100644 index 922457c9..00000000 --- a/docs/v2.3/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/v2.3/concepts/context-prop.md b/docs/v2.3/concepts/context-prop.md deleted file mode 100644 index 4ec09396..00000000 --- a/docs/v2.3/concepts/context-prop.md +++ /dev/null @@ -1,25 +0,0 @@ -# Context propagation - -**Context propagation** is the transfer of request metadata across the services and remote processes of a distributed system. Telepresence uses context propagation to intelligently route requests to the appropriate destination. - -This metadata is the context that is transferred across system services. It commonly takes the form of HTTP headers; context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -Metadata propagation refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - - -## What is distributed tracing? - -Distributed tracing is a technique for troubleshooting and profiling distributed microservices applications and is a common application for context propagation. It is becoming a key component for debugging. - -In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) (or another header format, such as [B3 headers](https://github.com/openzipkin/b3-propagation)), which facilitates maintaining the headers through every service without being stripped (the propagation). - - -## What are intercepts and preview URLs? - -[Intercepts](../../reference/intercepts) and [preview URLs](../../howtos/preview-urls/) are functions of Telepresence that enable easy local development from a remote Kubernetes cluster and offer a preview environment for sharing and real-time collaboration. - -Telepresence also uses custom headers and header propagation for controllable intercepts and preview URLs instead of for tracing. The headers facilitate the smart routing of requests either to live services in the cluster or services running locally on a developer’s machine. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to [Ambassador Cloud](https://app.getambassador.io) with other information about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. diff --git a/docs/v2.3/concepts/devloop.md b/docs/v2.3/concepts/devloop.md deleted file mode 100644 index 8b1fbf35..00000000 --- a/docs/v2.3/concepts/devloop.md +++ /dev/null @@ -1,50 +0,0 @@ -# The developer experience and the inner dev loop - -## How is the developer experience changing? - -The developer experience is the workflow a developer uses to develop, test, deploy, and release software. - -Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. - -The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, [controlled (canary) release](https://www.getambassador.io/docs/argo/latest/concepts/canary/), and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a [GitOps workflow](https://www.getambassador.io/docs/argo/latest/concepts/gitops/#what-is-gitops) and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. - -Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. - -Engineers now must design and build distributed service-based applications _and_ also assume responsibility for the full development life cycle. The new developer experience means that developers can no longer rely on monolithic application developer best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner development loop. Now developers have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this adds development time to the equation. - -## What is the inner dev loop? - -The inner dev loop is the single developer workflow. A single developer should be able to set up and use an inner dev loop to code and test changes quickly. - -Even within the Kubernetes space, developers will find much of the inner dev loop familiar. That is, code can still be written locally at a level that a developer controls and committed to version control. - -In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code — they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. - -![traditional inner dev loop](../../images/trad-inner-dev-loop.png) - -## In search of lost time: How does containerization change the inner dev loop? - -The inner dev loop is where writing and testing code happens, and time is critical for maximum developer productivity and getting features in front of end users. The faster the feedback loop, the faster developers can refactor and test again. - -Changes to the inner dev loop process, i.e., containerization, threaten to slow this development workflow down. Coding stays the same in the new inner dev loop, but code has to be containerized. The _containerized_ inner dev loop requires a number of new steps: - -* packaging code in containers -* writing a manifest to specify how Kubernetes should run the application (e.g., YAML-based configuration information, such as how much memory should be given to a container) -* pushing the container to the registry -* deploying containers in Kubernetes - -Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. - - -![container inner dev loop](../../images/container-inner-dev-loop.png) - -## Tackling the slow inner dev loop - -A slow inner dev loop can negatively impact frontend and backend teams, delaying work on individual and team levels and slowing releases into production overall. - -For example: - -* Frontend developers have to wait for previews of backend changes on a shared dev/staging environment (for example, until CI/CD deploys a new version) and/or rely on mocks/stubs/virtual services when coding their application locally. These changes are only verifiable by going through the CI/CD process to build and deploy within a target environment. -* Backend developers have to wait for CI/CD to build and deploy their app to a target environment to verify that their code works correctly with cluster or cloud-based dependencies as well as to share their work to get feedback. - -New technologies and tools can facilitate cloud-native, containerized development. And in the case of a sluggish inner dev loop, developers can accelerate productivity with tools that help speed the loop up again. diff --git a/docs/v2.3/concepts/devworkflow.md b/docs/v2.3/concepts/devworkflow.md deleted file mode 100644 index fa24fc2b..00000000 --- a/docs/v2.3/concepts/devworkflow.md +++ /dev/null @@ -1,7 +0,0 @@ -# The changing development workflow - -A changing workflow is one of the main challenges for developers adopting Kubernetes. Software development itself isn’t the challenge. Developers can continue to [code using the languages and tools with which they are most productive and comfortable](https://www.getambassador.io/resources/kubernetes-local-dev-toolkit/). That’s the beauty of containerized development. - -However, the cloud-native, Kubernetes-based approach to development means adopting a new development workflow and development environment. Beyond the basics, such as figuring out how to containerize software, [how to run containers in Kubernetes](https://www.getambassador.io/docs/kubernetes/latest/concepts/appdev/), and how to deploy changes into containers, for example, Kubernetes adds complexity before it delivers efficiency. The promise of a “quicker way to develop software” applies at least within the traditional aspects of the inner dev loop, where the single developer codes, builds and tests their software. But both within the inner dev loop and once code is pushed into version control to trigger the outer dev loop, the developer experience changes considerably from what many developers are used to. - -In this new paradigm, new steps are added to the inner dev loop, and more broadly, the developer begins to share responsibility for the full life cycle of their software. Inevitably this means taking new workflows and tools on board to ensure that the full life cycle continues full speed ahead. diff --git a/docs/v2.3/concepts/faster.md b/docs/v2.3/concepts/faster.md deleted file mode 100644 index b649e415..00000000 --- a/docs/v2.3/concepts/faster.md +++ /dev/null @@ -1,25 +0,0 @@ -# Making the remote local: Faster feedback, collaboration and debugging - -With the goal of achieving [fast, efficient development](https://www.getambassador.io/use-case/local-kubernetes-development/), developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. - -## How should I set up a Kubernetes development environment? - -[Setting up a development environment](https://www.getambassador.io/resources/development-environments-microservices/) for Kubernetes can be much more complex than the set up for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. - -While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. - -A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../../quick-start/) to facilitate fast feedback, debugging and collaboration. - -## What is Telepresence? - -Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../../quick-start/). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. - -## How can I get fast, efficient local development? - -The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. - -A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code, local debugging in production, and [preview URL](../../howtos/preview-urls/) functionality to share dev environments with others for multi-user collaboration. - -Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. - -The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts and preview URLs. diff --git a/docs/v2.3/doc-links.yml b/docs/v2.3/doc-links.yml deleted file mode 100644 index 37666455..00000000 --- a/docs/v2.3/doc-links.yml +++ /dev/null @@ -1,73 +0,0 @@ - - title: Quick start - link: quick-start - - title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Install Traffic Manager with Helm - link: install/helm/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ - - title: Core concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: 'Making the remote local: Faster feedback, collaboration and debugging' - link: concepts/faster - - title: Context propagation - link: concepts/context-prop - - title: How do I... - items: - - title: Intercept a service in your own environment - link: howtos/intercepts - - title: Share dev environments with preview URLs - link: howtos/preview-urls - - title: Proxy outbound traffic to my cluster - link: howtos/outbound - - title: Send requests to an intercepted service - link: howtos/request - - title: Technical reference - items: - - title: Architecture - link: reference/architecture - - title: Client reference - link: reference/client - items: - - title: login - link: reference/client/login - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Using Docker for intercepts - link: reference/docker-run - - title: Running Telepresence in a Docker container - link: reference/inside-container - - title: Environment variables - link: reference/environment - - title: Intercepts - link: reference/intercepts - - title: Volume mounts - link: reference/volume - - title: DNS resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: Networking through Virtual Network Interface - link: reference/tun-device - - title: Connection Routing - link: reference/routing - - title: Using Telepresence with Linkerd - link: reference/linkerd - - title: FAQs - link: faqs - - title: Troubleshooting - link: troubleshooting - - title: Community - link: community - - title: Release Notes - link: release-notes diff --git a/docs/v2.3/faqs.md b/docs/v2.3/faqs.md deleted file mode 100644 index d606fe71..00000000 --- a/docs/v2.3/faqs.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What operating systems does Telepresence work on?** - -Telepresence currently works natively on macOS and Linux. We are working on a native Windows port, but in the meantime, Windows users can use Telepresence with WSL 2. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](https://www.getambassador.io/feedback/) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Why are my intercepts still reporting as active when they've been disconnected?** - - In certain cases, Telepresence might not have been able to communicate back with Ambassador Cloud to update the intercept's status. Worry not, they will get garbage collected after a period of time. - -** Why is my intercept associated with an "Unreported" cluster?** - - Intercepts tagged with "Unreported" clusters simply mean Ambassador Cloud was unable to associate a service instance with a known detailed service from an Edge Stack or API Gateway cluster. [Connecting your cluster to the Service Catalog](/docs/telepresence/latest/quick-start/) will properly match your services from multiple data sources. - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the traffic-manager and traffic-agent images that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single `traffic-manager` service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the `traffic-manager` service installed in the cluster and `traffic-agent` containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established by using -the `kubectl port-forward` machinery (though without actually spawning -a separate program) to establish a TCP connection to Telepresence -Traffic Manager in the cluster, and running Telepresence's custom VPN -protocol over that TCP connection. - - - -** What identity providers are supported for authenticating to view a preview URL?** - -* GitHub -* GitLab -* Google - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](https://www.getambassador.io/feedback/) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Telepresence will be open source soon, in the meantime it is free to download. We prioritized releasing the binary as soon as possible for community feedback, but are actively working on the open sourcing logistics. - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](https://www.getambassador.io/feedback/), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/v2.3/howtos/intercepts.md b/docs/v2.3/howtos/intercepts.md deleted file mode 100644 index e2536d1b..00000000 --- a/docs/v2.3/howtos/intercepts.md +++ /dev/null @@ -1,298 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Intercept a service in your own environment - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Intercept your service](#3-intercept-your-service) -* [4. Create a preview URL to only intercept certain requests to your service](#4-create-a-preview-url-to-only-intercept-certain-requests-to-your-service) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -For a detailed walk-though on creating intercepts using our sample app, follow the quick start guide. - -## Prerequisites -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -This guide assumes you have a Kubernetes deployment and service accessible publicly by an ingress controller and that you can run a copy of that service on your laptop. - -## 1. Install the Telepresence CLI - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: - `telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: - `curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Intercept your service - -In this section, we will go through the steps required for you to intercept all traffic going to a service in your cluster and route it to your local environment instead. - -1. List the services that you can intercept with `telepresence list` and make sure the one you want to intercept is listed. - - For example, this would confirm that `example-service` can be intercepted by Telepresence: - ``` - $ telepresence list - - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -2. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example, this would show that the port `80` is named `http` in the `example-service`: - - ``` - $ kubectl get service example-service --output yaml - - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -3. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - - - For the `--port` argument, specify the port on which your local instance of your service will be running. - - If the service you are intercepting exposes more than one port, specify the one you want to intercept after a colon. - - For the `--env-file` argument, specify the path to a file on which Telepresence should write the environment variables that your service is currently running with. This is going to be useful as we start our service. - - For the example below, Telepresence will intercept traffic going to service `example-service` so that requests reaching it on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -4. Start your local environment using the environment variables retrieved in the previous step. - - Here are a few options to pass the environment variables to your local process: - - with `docker run`, provide the path to the file using the [`--env-file` argument](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) - - with JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.) use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile) - - with Visual Studio Code, specify the path to the environment variables file in the `envFile` field of your configuration - -5. Query the environment in which you intercepted a service the way you usually would and see your local instance being invoked. - - - Didn't work? Make sure the port you're listening on matches the one specified when creating your intercept. - - - - Congratulations! All the traffic usually going to your Kubernetes Service is now being routed to your local environment! - - -You can now: -- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - -## 4. Create a preview URL to only intercept certain requests to your service - -When working on a development environment with multiple engineers, you -don't want your intercepts to impact your teammates. If you are -[logged in](../../reference/client/login/), then when creating an -intercept, by default Telpresence will automatically talk to -Ambassador Cloud to generate a preview URL. By doing so, Telepresence -can route only the requests coming from that preview URL to your local -environment; the rest will be routed to your cluster as usual. - -1. Clean up your previous intercept by removing it: -`telepresence leave ` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept --port [:] --env-file ` - - You will be asked for the following information: - 1. **Ingress layer 3 address**: This would usually be the internal address of your ingress controller in the format `.namespace`. For example, if you have a service `ambassador-edge-stack` in the `ambassador` namespace, you would enter `ambassador-edge-stack.ambassador`. - 2. **Ingress port**: The port on which your ingress controller is listening (often 80 for non-TLS and 443 for TLS). - 3. **Ingress TLS encryption**: Whether the ingress controller is expecting TLS communication on the specified port. - 4. **Ingress layer 5 hostname**: If your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), this is the value you would need to enter here. - - - Telepresence supports any ingress controller, not just Ambassador Edge Stack. - - - For the example below, you will create a preview URL that will send traffic to the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and setting the `Host` HTTP header to `dev-environment.edgestack.me`: - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [default: -]: 443 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using Deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -4. Start your local service as in the previous step. - -5. Go to the preview URL printed after doing the intercept and see that your local service is processing the request. - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -6. Make a request on the URL you would usually query for that environment. The request should not be routed to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) will route to services in the cluster like normal. - - - Congratulations! You have now only intercepted traffic coming from your preview URL, without impacting your teammates. - - -You can now: -- Make changes on the fly and see them reflected when interacting with your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - -...and all of this without impacting your teammates! -## What's Next? - - diff --git a/docs/v2.3/howtos/outbound.md b/docs/v2.3/howtos/outbound.md deleted file mode 100644 index 31830032..00000000 --- a/docs/v2.3/howtos/outbound.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Proxy outbound traffic to my cluster - -While preview URLs are a powerful feature, there are other options to use Telepresence for proxying traffic between your laptop and the cluster. - - We'll assume below that you have the quick start sample web app running in your cluster so that we can test accessing the web-app service. That service can be substituted however for any service you are running. - -## Proxying outbound traffic - -Connecting to the cluster instead of running an intercept will allow you to access cluster workloads as if your laptop was another pod in the cluster. You will be able to access other Kubernetes services using `.`, for example by curling a service from your terminal. A service running on your laptop will also be able to interact with other services on the cluster by name. - -Connecting to the cluster starts the background daemon on your machine and installs the [Traffic Manager pod](../../reference/architecture/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect`, you will be prompted for your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.3.7 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground /home//.cache/telepresence/logs '' ''" - [sudo] password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -1. Run `telepresence status` to confirm that you are connected to your cluster and are proxying traffic to it. - - ``` - $ telepresence status - Root Daemon: Running - Version : v2.3.7 (api 3) - Primary DNS : "" - Fallback DNS: "" - User Daemon: Running - Version : v2.3.7 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 0 total - ``` - -1. Now try to access your service by name with `curl web-app.emojivoto:80`. Telepresence will route the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl web-app.emojivoto:80 - - - - - Emoji Vote - ... - ``` - -3. Terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer being proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -When using Telepresence in this way, services must be accessed with the namespace qualified DNS name (<service name>.<namespace>) before starting an intercept. After starting an intercept, only <service name> is required. Read more about these differences in DNS resolution here. - -## Controlling outbound connectivity - -By default, Telepresence will provide access to all Services found in all namespaces in the connected cluster. This might lead to problems if the user does not have access permissions to all namespaces via RBAC. The `--mapped-namespaces ` flag was added to give the user control over exactly which namespaces will be accessible. - -When using this option, it is important to include all namespaces containing services to be accessed and also all namespaces that contain services that those intercepted services might use. - -### Using local-only intercepts - -An intercept with the flag`--local-only` can be used to control outbound connectivity to specific namespaces. - -When developing services that have not yet been deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service is intended to be deployed so that it can access other services in that namespace without using qualified names. Worth noting though, is that a local-only intercept will not cause outbound connections to originate from the intercepted namespace. Only a real intercept can do that. The reason for this is that in order to establish correct origin, the connection must be routed to a `traffic-agent`of an intercepted pod. For local-only intercepts, the outbound connections will originate from the `traffic-manager`. - - ``` - $ telepresence intercept --namespace --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. The intercept is deactivated just like any other intercept. - - ``` - $ telepresence leave - ``` -The unqualified name access is now removed provided that no other intercept is active and using the same namespace. - -### External dependencies (formerly `--also-proxy`) - -If you have a resource outside of the cluster that you need access to, you can leverage Headless Services to provide access. This will give you a kubernetes service formatted like all other services (`my-service.prod.svc.cluster.local`), that resolves to your resource. - -If the outside service has a DNS name, you can use the [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) service type, which will create a service that can be used from within your cluster and from your local machine when connected with telepresence. - -If the outside service is an ip, create a [service without selectors](https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors) and then create an endpoint of the same name. - -In both scenarios, Kubernetes will create a service that can be used from within your cluster and from your local machine when connected with telepresence. diff --git a/docs/v2.3/howtos/preview-urls.md b/docs/v2.3/howtos/preview-urls.md deleted file mode 100644 index 4415a544..00000000 --- a/docs/v2.3/howtos/preview-urls.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Share dev environments with preview URLs - -Telepresence can generate sharable preview URLs, allowing you to work on a copy of your service locally and share that environment directly with a teammate for pair programming. While using preview URLs Telepresence will route only the requests coming from that preview URL to your local environment; requests to the ingress will be routed to your cluster as usual. - -Preview URLs are protected behind authentication via Ambassador Cloud, ensuring that only users in your organization can view them. A preview URL can also be set to allow public access for sharing with outside collaborators. - -## Prerequisites - -* You should have the Telepresence CLI [installed](../../install/) on your laptop. - -* If you have Telepresence already installed and have used it previously, please first reset it with `telepresence uninstall --everything`. - -* You will need a service running in your cluster that you would like to intercept. - - -Need a sample app to try with preview URLs? Check out the quick start. It has a multi-service app to install in your cluster with instructions to create a preview URL for that app. - - -## Creating a preview URL - -1. List the services that you can intercept with `telepresence list` and make sure the one you want is listed. - - If it isn't: - - * Only Deployments, ReplicaSets, or StatefulSets are supported, and each of those requires a label matching a Service - - * If the service is in a different namespace, specify it with the `--namespace` flag - -2. Log in to Ambassador Cloud where you can manage and share preview - URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept: -`telepresence intercept --port --env-file ` - - For `--port`, specify the port on which your local instance of your service will be running. If the service you are intercepting exposes more than one port, specify the one you want to intercept after a colon. - - For `--env-file`, specify a file path where Telepresence will write the environment variables that are set in the Pod. This is going to be useful as we start our service locally. - - You will be asked for the following information: - 1. **Ingress layer 3 address**: This would usually be the internal address of your ingress controller in the format `.namespace `. For example, if you have a service `ambassador-edge-stack` in the `ambassador` namespace, you would enter `ambassador-edge-stack.ambassador`. - 2. **Ingress port**: The port on which your ingress controller is listening (often 80 for non-TLS and 443 for TLS). - 3. **Ingress TLS encryption**: Whether the ingress controller is expecting TLS communication on the specified port. - 4. **Ingress layer 5 hostname**: If your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), enter that value here. - - For the example below, you will create a preview URL for `example-service` which listens on port 8080. The preview URL for ingress will use the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and the hostname `dev-environment.edgestack.me`: - - ``` - $ telepresence intercept example-service --port 8080 --env-file ~/ex-svc.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [default: -]: 443 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -4. Start your local environment using the environment variables retrieved in the previous step. - - Here are a few options to pass the environment variables to your local process: - - with `docker run`, provide the path to the file using the [`--env-file` argument](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file) - - with JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.) use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile) - - with Visual Studio Code, specify the path to the environment variables file in the `envFile` field of your configuration - -5. Go to the preview URL that was provided after starting the intercept (the next to last line in the terminal output above). Your local service will be processing the request. - - - Success! You have intercepted traffic coming from your preview URL without impacting other traffic from your Ingress. - - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -6. Make a request on the URL you would usually query for that environment. The request should **not** be routed to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) will route to services in the cluster like normal. - -7. Share with a teammate. - - You can collaborate with teammates by sending your preview URL to - them. They will be asked to log in to Ambassador Cloud if they are - not already. Upon login they must select the same identity - provider and org as you are using; that is how they are authorized - to access the preview URL (see the [list of supported identity - providers](../../faqs/#idps)). When they visit the preview URL, - they will see the intercepted service running on your laptop. - - - Congratulations! You have now created a dev environment and shared it with a teammate! While you and your partner work together to debug your service, the production version remains unchanged to the rest of your team until you commit your changes. - - -## Sharing a preview URL with people outside your team - -To collaborate with someone outside of your identity provider's organization, you must go to [Ambassador Cloud](https://app.getambassador.io/cloud/), navigate to your service's intercepts, select the preview URL details, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the preview URL either from the dashboard or by running `telepresence preview remove ` also removes all access to the preview URL. - -## Change access restrictions - -To collaborate with someone outside of your identity provider's organization, you must make your preview URL publicly accessible. - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) -2. Navigate to the desired service Intercepts page -3. Expand the preview URL details -4. Click **Make Publicly Accessible** - -Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on a local environment. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. - -## Remove a preview URL from an Intercept - -To delete a preview URL and remove all access to the intercepted service, - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) -2. Navigate to the desired service Intercepts page -3. Expand the preview URL details -4. Click **Remove Preview** - -Alternatively, a preview URL can also be removed by running -`telepresence preview remove ` diff --git a/docs/v2.3/howtos/request.md b/docs/v2.3/howtos/request.md deleted file mode 100644 index 56d598fa..00000000 --- a/docs/v2.3/howtos/request.md +++ /dev/null @@ -1,12 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Send requests to an intercepted service - -Ambassador Cloud can inform you about the required request parameters to reach an intercepted service. - - 1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) - 2. Navigate to the desired service Intercepts page - 3. Click the **Query** button to open the pop-up menu. - 4. Toggle between **CURL**, **Headers** and **Browse**. - -The pre-built queries and header information should help you get started to query the desired intercepted service and manage header propagation. diff --git a/docs/v2.3/images/container-inner-dev-loop.png b/docs/v2.3/images/container-inner-dev-loop.png deleted file mode 100644 index 06586cd6..00000000 Binary files a/docs/v2.3/images/container-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.3/images/github-login.png b/docs/v2.3/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/v2.3/images/github-login.png and /dev/null differ diff --git a/docs/v2.3/images/logo.png b/docs/v2.3/images/logo.png deleted file mode 100644 index 701f63ba..00000000 Binary files a/docs/v2.3/images/logo.png and /dev/null differ diff --git a/docs/v2.3/images/trad-inner-dev-loop.png b/docs/v2.3/images/trad-inner-dev-loop.png deleted file mode 100644 index 618b674f..00000000 Binary files a/docs/v2.3/images/trad-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.3/install/index.md b/docs/v2.3/install/index.md deleted file mode 100644 index aefbab39..00000000 --- a/docs/v2.3/install/index.md +++ /dev/null @@ -1,91 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install - -Install Telepresence by running the commands below for your OS. - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## What's Next? - -Follow one of our [quick start guides](../quick-start/) to start using Telepresence, either with our sample app or in your own environment. - -## Installing nightly versions of Telepresence - -We build and publish the contents of the default branch, [release/v2](https://github.com/telepresenceio/telepresence), of Telepresence -nightly, Monday through Friday. - -The tags are formatted like so: `vX.Y.Z-nightly-$gitShortHash`. - -`vX.Y.Z` is the most recent release of Telepresence with the patch version (Z) bumped one higher. -For example, if our last release was 2.3.4, nightly builds would start with v2.3.5, until a new -version of Telepresence is released. - -`$gitShortHash` will be the short hash of the git commit of the build. - -Use these URLs to download the most recent nightly build. - - - - -``` -https://app.getambassador.io/download/tel2/darwin/amd64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/nightly/telepresence -``` - - - - -## Installing older versions of Telepresence - -Use these URLs to download an older version for your OS (including older nightly builds), replacing `x.y.z` with the versions you want. - - - - -``` -https://app.getambassador.io/download/tel2/darwin/amd64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/x.y.z/telepresence -``` - - - diff --git a/docs/v2.3/install/migrate-from-legacy.md b/docs/v2.3/install/migrate-from-legacy.md deleted file mode 100644 index a61e17af..00000000 --- a/docs/v2.3/install/migrate-from-legacy.md +++ /dev/null @@ -1,98 +0,0 @@ -# Migrate from legacy Telepresence - -Telepresence (formerly referenced as Telepresence 2, which is the current major version) has different mechanics and requires a different mental model from [legacy Telepresence 1](https://www.telepresence.io/docs/v1/) when working with local instances of your services. - -In legacy Telepresence, a pod running a service was swapped with a pod running the Telepresence proxy. This proxy received traffic intended for the service, and sent the traffic onward to the target workstation or laptop. We called this mechanism "swap-deployment". - -In practice, this mechanism, while simple in concept, had some challenges. Losing the connection to the cluster would leave the deployment in an inconsistent state. Swapping the pods would take time. - -Telepresence introduces a [new architecture](../../reference/architecture/) built around "intercepts" that addresses these problems. With Telepresence, a sidecar proxy is injected onto the pod. The proxy then intercepts traffic intended for the pod and routes it to the workstation/laptop. The advantage of this approach is that the service is running at all times, and no swapping is used. By using the proxy approach, we can also do selective intercepts, where certain types of traffic get routed to the service while other traffic gets routed to your laptop/workstation. - -Please see [the Telepresence quick start](../../quick-start/) for an introduction to running intercepts and [the intercept reference doc](../../reference/intercepts/) for a deep dive into intercepts. - -## Using legacy Telepresence commands - -First please ensure you've [installed Telepresence](../). - -Telepresence is able to translate common legacy Telepresence commands into native Telepresence commands. -So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used -to with the Telepresence binary. - -For example, say you have a deployment (`myserver`) that you want to swap deployment (equivalent to intercept in -Telepresence) with a python server, you could run the following command: - -``` -$ telepresence --swap-deployment myserver --expose 9090 --run python3 -m http.server 9090 -< help text > - -Legacy telepresence command used -Command roughly translates to the following in Telepresence: -telepresence intercept echo-easy --port 9090 -- python3 -m http.server 9090 -running... -Connecting to traffic manager... -Connected to context -Using Deployment myserver -intercepted - Intercept name : myserver - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:9090 - Intercepting : all TCP connections -Serving HTTP on :: port 9090 (http://[::]:9090/) ... -``` - -Telepresence will let you know what the legacy Telepresence command has mapped to and automatically -runs it. So you can get started with Telepresence today, using the commands you are used to -and it will help you learn the Telepresence syntax. - -### Legacy command mapping - -Below is the mapping of legacy Telepresence to Telepresence commands (where they exist and -are supported). - -| Legacy Telepresence Command | Telepresence Command | -|--------------------------------------------------|--------------------------------------------| -| --swap-deployment $workload | intercept $workload | -| --expose localPort[:remotePort] | intercept --port localPort[:remotePort] | -| --swap-deployment $workload --run-shell | intercept $workload -- bash | -| --swap-deployment $workload --run $cmd | intercept $workload -- $cmd | -| --swap-deployment $workload --docker-run $cmd | intercept $workload --docker-run -- $cmd | -| --run-shell | connect -- bash | -| --run $cmd | connect -- $cmd | -| --env-file,--env-json | --env-file, --env-json (haven't changed) | -| --context,--namespace | --context, --namespace (haven't changed) | -| --mount,--docker-mount | --mount, --docker-mount (haven't changed) | - -### Legacy Telepresence command limitations - -Some of the commands and flags from legacy Telepresence either didn't apply to Telepresence or -aren't yet supported in Telepresence. For some known popular commands, such as --method, -Telepresence will include output letting you know that the flag has gone away. For flags that -Telepresence can't translate yet, it will let you know that that flag is "unsupported". - -If Telepresence is missing any flags or functionality that is integral to your usage, please let us know -by [creating an issue](https://github.com/telepresenceio/telepresence/issues) and/or talking to us on our [Slack channel](https://a8r.io/Slack)! - -## Telepresence changes - -Telepresence installs a Traffic Manager in the cluster and Traffic Agents alongside workloads when performing intercepts (including -with `--swap-deployment`) and leaves them. If you use `--swap-deployment`, the intercept will be left once the process -dies, but the agent will remain. There's no harm in leaving the agent running alongside your service, but when you -want to remove them from the cluster, the following Telepresence command will help: -``` -$ telepresence uninstall --help -Uninstall telepresence agents and manager - -Usage: - telepresence uninstall [flags] { --agent |--all-agents | --everything } - -Flags: - -d, --agent uninstall intercept agent on specific deployments - -a, --all-agents uninstall intercept agent on all deployments - -e, --everything uninstall agents and the traffic manager - -h, --help help for uninstall - -n, --namespace string If present, the namespace scope for this CLI request -``` - -Since the new architecture deploys a Traffic Manager into the Ambassador namespace, please take a look at -our [rbac guide](../../reference/rbac) if you run into any issues with permissions while upgrading to Telepresence. diff --git a/docs/v2.3/install/upgrade.md b/docs/v2.3/install/upgrade.md deleted file mode 100644 index 7a9c3d60..00000000 --- a/docs/v2.3/install/upgrade.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -import Platform from '@src/components/Platform'; - -# Upgrade Process -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. - - - - -```shell -# Upgrade via brew: -brew upgrade datawire/blackbird/telepresence - -# OR upgrade manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -After upgrading your CLI, the Traffic Manager **must be uninstalled** from your cluster. This can be done using `telepresence uninstall --everything` or by `kubectl delete svc,deploy -n ambassador traffic-manager`. The next time you run a `telepresence` command it will deploy an upgraded Traffic Manager. diff --git a/docs/v2.3/quick-start/TelepresenceQuickStartLanding.js b/docs/v2.3/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index f9d9e572..00000000 --- a/docs/v2.3/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/v2.3/quick-start/demo-node.md b/docs/v2.3/quick-start/demo-node.md deleted file mode 100644 index a847029a..00000000 --- a/docs/v2.3/quick-start/demo-node.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.3/quick-start/demo-node/) diff --git a/docs/v2.3/quick-start/demo-react.md b/docs/v2.3/quick-start/demo-react.md deleted file mode 100644 index da7929ff..00000000 --- a/docs/v2.3/quick-start/demo-react.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.3/quick-start/demo-react/) diff --git a/docs/v2.3/quick-start/go.md b/docs/v2.3/quick-start/go.md deleted file mode 120000 index c884a46c..00000000 --- a/docs/v2.3/quick-start/go.md +++ /dev/null @@ -1 +0,0 @@ -qs-go.md \ No newline at end of file diff --git a/docs/v2.3/quick-start/index.md b/docs/v2.3/quick-start/index.md deleted file mode 100644 index efcb65b5..00000000 --- a/docs/v2.3/quick-start/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- - description: Telepresence Quick Start. ---- - -import TelepresenceQuickStartLanding from './TelepresenceQuickStartLanding' - - diff --git a/docs/v2.3/quick-start/qs-cards.js b/docs/v2.3/quick-start/qs-cards.js deleted file mode 100644 index 31582355..00000000 --- a/docs/v2.3/quick-start/qs-cards.js +++ /dev/null @@ -1,70 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - - - - - FAQs - - - - Learn more about uses cases and the technical implementation of - Telepresence. - - - - -
- ); -} diff --git a/docs/v2.3/quick-start/qs-go.md b/docs/v2.3/quick-start/qs-go.md deleted file mode 100644 index a04ba23a..00000000 --- a/docs/v2.3/quick-start/qs-go.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.3/quick-start/qs-java.md b/docs/v2.3/quick-start/qs-java.md deleted file mode 100644 index 0478503c..00000000 --- a/docs/v2.3/quick-start/qs-java.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.3/quick-start/qs-node.md b/docs/v2.3/quick-start/qs-node.md deleted file mode 100644 index cbc80a64..00000000 --- a/docs/v2.3/quick-start/qs-node.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.3/quick-start/qs-python-fastapi.md b/docs/v2.3/quick-start/qs-python-fastapi.md deleted file mode 100644 index 9326bdf8..00000000 --- a/docs/v2.3/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.3/quick-start/qs-python.md b/docs/v2.3/quick-start/qs-python.md deleted file mode 100644 index 86fcead2..00000000 --- a/docs/v2.3/quick-start/qs-python.md +++ /dev/null @@ -1,349 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL -Create preview URLs to do selective intercepts, meaning only traffic coming from the preview URL will be intercepted, so you can easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.3/quick-start/telepresence-quickstart-landing.less b/docs/v2.3/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index 1a8c3ddc..00000000 --- a/docs/v2.3/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,185 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.doc-body .telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1, - h2 { - color: @blue-dark; - font-style: normal; - font-weight: normal; - letter-spacing: 0.25px; - } - - h1 { - font-size: 33px; - line-height: 40px; - - svg { - vertical-align: text-bottom; - } - } - - h2 { - font-size: 23px; - line-height: 33px; - margin: 0 0 1rem; - - .highlight-mark { - background: transparent; - color: @blue-dark; - background: -moz-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -webkit-gradient( - linear, - left top, - left bottom, - color-stop(0%, transparent), - color-stop(60%, transparent), - color-stop(60%, fade(@blue-electric, 15%)), - color-stop(100%, fade(@blue-electric, 15%)) - ); - background: -webkit-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -o-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: -ms-linear-gradient( - top, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - background: linear-gradient( - to bottom, - transparent 0%, - transparent 60%, - fade(@blue-electric, 15%) 60%, - fade(@blue-electric, 15%) 100% - ); - filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='transparent', endColorstr='fade(@blue-electric, 15%)',GradientType=0 ); - padding: 0 3px; - margin: 0 0.1em 0 0; - } - } - - .telepresence-choice { - background: @white; - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - - strong { - color: @blue; - } - } - - .telepresence-choice-wrapper { - border-bottom: solid 1px @grey-separator; - column-gap: 60px; - display: inline-grid; - grid-template-columns: repeat(2, 1fr); - margin: 20px 0 50px; - padding: 0 0 62px; - width: 100%; - - .telepresence-choice { - ol { - li { - font-size: 14px; - } - } - - .get-started-button { - background-color: @green; - border-radius: 5px; - color: @white; - display: inline-flex; - font-style: normal; - font-weight: 600; - font-size: 14px; - line-height: 24px; - margin: 0 0 15px 5px; - padding: 13px 20px; - align-items: center; - letter-spacing: 1.25px; - text-decoration: none; - text-transform: uppercase; - transition: background-color 200ms linear 0ms; - - svg { - fill: @white; - height: 20px; - width: 20px; - } - - &:hover { - background-color: @green-dark; - text-decoration: none; - } - } - - p { - font-style: normal; - font-weight: normal; - font-size: 16px; - line-height: 26px; - letter-spacing: 0.5px; - } - } - } - - .video-wrapper { - display: flex; - flex-direction: row; - - ul { - li { - font-size: 14px; - margin: 0 10px 10px 0; - } - } - - div { - &.video-container { - flex: 1 1 70%; - position: relative; - width: 100%; - padding-bottom: 39.375%; - - .video { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; - } - } - - &.description { - flex: 0 1 30%; - } - } - } -} diff --git a/docs/v2.3/redirects.yml b/docs/v2.3/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/v2.3/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/v2.3/reference/architecture.md b/docs/v2.3/reference/architecture.md deleted file mode 100644 index 47facb0b..00000000 --- a/docs/v2.3/reference/architecture.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `kubectl describe -pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/v2.3/reference/client.md b/docs/v2.3/reference/client.md deleted file mode 100644 index 93023663..00000000 --- a/docs/v2.3/reference/client.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| [`login`](login) | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `preview` | Create or remove [preview URLs](../../howtos/preview-urls) for existing intercepts: `telepresence preview create ` | -| `status` | Shows the current connectivity status | -| `quit` | Quits the local daemon, stopping all intercepts and outbound traffic to the cluster| -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000`. A special flag `--docker-run` can be used to run the local instance [in a docker container](../docker-run). | -| `leave` | Stops an active intercept: `telepresence leave hello` | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. diff --git a/docs/v2.3/reference/client/login.md b/docs/v2.3/reference/client/login.md deleted file mode 100644 index 269a240d..00000000 --- a/docs/v2.3/reference/client/login.md +++ /dev/null @@ -1,53 +0,0 @@ -# Telepresence Login - -```console -$ telepresence login --help -Authenticate to Ambassador Cloud - -Usage: - telepresence login [flags] - -Flags: - --apikey string Static API key to use instead of performing an interactive login -``` - -## Description - -Use `telepresence login` to explicitly authenticate with [Ambassador -Cloud](https://www.getambassador.io/docs/cloud). Unless the -[`skipLogin` option](../../config) is set, other commands will -automatically invoke the `telepresence login` interactive login -procedure as nescessary, so it is rarely nescessary to explicitly run -`telepresence login`; it should only be truly nescessary to explictly -run `telepresence login` when you require a non-interactive login. - -The normal interactive login procedure involves launching a web -browser, a user interacting with that web browser, and finally having -the web browser make callbacks to the local Telepresence process. If -it is not possible to do this (perhaps you are using a headless remote -box via SSH, or are using Telepresence in CI), then you may instead -have Ambassador Cloud issue an API key that you pass to `telepresence -login` with the `--apikey` flag. - -## Acquiring an API key - -1. Log in to Ambassador Cloud at https://app.getambassador.io/ . - -2. Click on your profile icon in the upper-left: ![Screenshot with the - mouse pointer over the upper-left profile icon](./apikey-2.png) - -3. Click on the "API Keys" menu button: ![Screenshot with the mouse - pointer over the "API Keys" menu button](./apikey-3.png) - -4. Click on the "generate new key" button in the upper-right: - ![Screenshot with the mouse pointer over the "generate new key" - button](./apikey-4.png) - -5. Enter a description for the key (perhaps the name of your laptop, - or perhaps the "CI"), and click "generate api key" to create it. - -You may now pass the API key as `KEY` to `telepresence login --apikey=KEY`. - -Telepresence will use that "master" API key to create narrower keys -for different components of Telepresence. You will see these appear -in the Ambassador Cloud web interface. diff --git a/docs/v2.3/reference/client/login/apikey-2.png b/docs/v2.3/reference/client/login/apikey-2.png deleted file mode 100644 index 1379502a..00000000 Binary files a/docs/v2.3/reference/client/login/apikey-2.png and /dev/null differ diff --git a/docs/v2.3/reference/client/login/apikey-3.png b/docs/v2.3/reference/client/login/apikey-3.png deleted file mode 100644 index 4559b784..00000000 Binary files a/docs/v2.3/reference/client/login/apikey-3.png and /dev/null differ diff --git a/docs/v2.3/reference/client/login/apikey-4.png b/docs/v2.3/reference/client/login/apikey-4.png deleted file mode 100644 index 25c6581a..00000000 Binary files a/docs/v2.3/reference/client/login/apikey-4.png and /dev/null differ diff --git a/docs/v2.3/reference/cluster-config.md b/docs/v2.3/reference/cluster-config.md deleted file mode 100644 index 22262dc1..00000000 --- a/docs/v2.3/reference/cluster-config.md +++ /dev/null @@ -1,184 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; -import { ClusterConfig } from '@src/components/Docs/Telepresence'; - -# Cluster-side configuration - -For the most part, Telepresence doesn't require any special -configuration in the cluster and can be used right away in any -cluster (as long as the user has adequate [RBAC permissions](../rbac)). - -However, some advanced features do require some configuration in the -cluster. - -## TLS - -In this example, other applications in the cluster expect to speak TLS to your -intercepted application (perhaps you're using a service-mesh that does -mTLS). - -In order to use `--mechanism=http` (or any features that imply -`--mechanism=http`) you need to tell Telepresence about the TLS -certificates in use. - -Tell Telepresence about the certificates in use by adjusting your -[workload's](../intercepts/#supported-workloads) Pod template to set a couple of -annotations on the intercepted Pods: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ "getambassador.io/inject-terminating-tls-secret": "your-terminating-secret" # optional -+ "getambassador.io/inject-originating-tls-secret": "your-originating-secret" # optional - spec: -+ serviceAccountName: "your-account-that-has-rbac-to-read-those-secrets" - containers: -``` - -- The `getambassador.io/inject-terminating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS server - certificate to use for decrypting and responding to incoming - requests. - - When Telepresence modifies the Service and workload port - definitions to point at the Telepresence Agent sidecar's port - instead of your application's actual port, the sidecar will use this - certificate to terminate TLS. - -- The `getambassador.io/inject-originating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS - client certificate to use for communicating with your application. - - You will need to set this if your application expects incoming - requests to speak TLS (for example, your - code expects to handle mTLS itself instead of letting a service-mesh - sidecar handle mTLS for it, or the port definition that Telepresence - modified pointed at the service-mesh sidecar instead of at your - application). - - If you do set this, you should to set it to the - same client certificate Secret that you configure the Ambassador - Edge Stack to use for mTLS. - -It is only possible to refer to a Secret that is in the same Namespace -as the Pod. - -The Pod will need to have permission to `get` and `watch` each of -those Secrets. - -Telepresence understands `type: kubernetes.io/tls` Secrets and -`type: istio.io/key-and-cert` Secrets; as well as `type: Opaque` -Secrets that it detects to be formatted as one of those types. - -## Air gapped cluster - -If your cluster is air gapped (it does not have access to the -internet and therefore cannot connect to Ambassador Cloud), some additional -configuration is required to acquire a license use selective intercepts. - -### Create a license - -1. - -2. Generate a new license (if one doesn't already exist) by clicking *Generate New License*. - -3. You will be prompted for your Cluster ID. Ensure your -kubeconfig context is using the cluster you want to create a license for then -run this command to generate the Cluster ID: - - ``` - $ telepresence current-cluster-id - - Cluster ID: - ``` - -4. Click *Generate API Key* to finish generating the license. - -### Add license to cluster - -1. On the licenses page, download the license file associated with your cluster. - -2. Use this command to generate a Kubernetes Secret config using the license file: - - ``` - $ telepresence license -f - - apiVersion: v1 - data: - hostDomain: - license: - kind: Secret - metadata: - creationTimestamp: null - name: systema-license - namespace: ambassador - ``` - -3. Save the output as a YAML file and apply it to your -cluster with `kubectl`. - -4. Ensure that you have the docker image for the Smart Agent (datawire/ambassador-telepresence-agent:1.8.0) -pulled and in a registry your cluster can pull from. - -5. Have users use the `images` [config key](../config/#images) keys so telepresence uses the aforementioned image for their agent. - -Users will now be able to use selective intercepts with the -`--preview-url=false` flag (since use of preview URLs requires a connection to Ambassador Cloud). - -If using Helm to install the server-side components, see the chart's [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence) to learn how to configure the image registry and license secret. - -Have clients use the [skipLogin](../config/#cloud) key to ensure the cli knows it is operating in an -air-gapped environment. - -## Mutating Webhook - -By default, Telepresence updates the intercepted workload (Deployment, StatefulSet, ReplicaSet) -template to add the [Traffic Agent](../architecture/#traffic-agent) sidecar container and update the -port definitions. If you use GitOps workflows (with tools like ArgoCD) to automatically update your -cluster so that it reflects the desired state from an external Git repository, this behavior can make -your workload out of sync with that external desired state. - -To solve this issue, you can use Telepresence's Mutating Webhook alternative mechanism. Intercepted -workloads will then stay untouched and only the underlying pods will be modified to inject the Traffic -Agent sidecar container and update the port definitions. - - -A current limitation of the Mutating Webhook mechanism is that the targetPort of your intercepted -Service needs to point to the name of a port on your container, not the port number itself. - - -Simply add the `telepresence.getambassador.io/inject-traffic-agent: enabled` annotation to your -workload template's annotations: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ telepresence.getambassador.io/inject-traffic-agent: enabled - spec: - containers: -``` - -### Service Port Annotation - -A service port annotation can be added to the workload to make the Mutating Webhook select a specific port -in the service. This is necessary when the service has multiple ports. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-port: https - spec: - containers: -``` diff --git a/docs/v2.3/reference/config.md b/docs/v2.3/reference/config.md deleted file mode 100644 index e6b3ccb7..00000000 --- a/docs/v2.3/reference/config.md +++ /dev/null @@ -1,182 +0,0 @@ -# Laptop-side configuration - -## Global Configuration -Telepresence uses a `config.yml` file to store and change certain global configuration values that will be used for all clusters you use Telepresence with. The location of this file varies based on your OS: - -* macOS: `$HOME/Library/Application Support/telepresence/config.yml` -* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` - -For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. - -### Values - -The config file currently supports values for the `timeouts`, `logLevels`, `images`, `cloud`, and `grpc` keys. - -Here is an example configuration to show you the conventions of how Telepresence is configured: -**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** - -```yaml -timeouts: - agentInstall: 1m - intercept: 10s -logLevels: - userDaemon: debug -images: - registry: privateRepo # This overrides the default docker.io/datawire repo - agentImage: ambassador-telepresence-agent:1.8.0 # This overrides the agent image to inject when intercepting -grpc: - maxReceiveSize: 10Mi -``` - -#### Timeouts -Values for `timeouts` are all durations either as a number respresenting seconds or a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings can be fractional (`1.5h`) or combined (`2h45m`). - -These are the valid fields for the `timeouts` key: - -|Field|Description|Default| -|---|---|---| -|`agentInstall`|Waiting for Traffic Agent to be installed|2 minutes| -|`apply`|Waiting for a Kubernetes manifest to be applied|1 minute| -|`clusterConnect`|Waiting for cluster to be connected|20 seconds| -|`intercept`|Waiting for an intercept to become active|5 seconds| -|`proxyDial`|Waiting for an outbound connection to be established|5 seconds| -|`trafficManagerConnect`|Waiting for the Traffic Manager API to connect for port fowards|20 seconds| -|`trafficManagerAPI`|Waiting for connection to the gPRC API after `trafficManagerConnect` is successful|15 seconds| - -#### Log Levels -Values for `logLevels` are one of the following strings: `trace`, `debug`, `info`, `warning`, `error`, `fatal` and `panic`. -These are the valid fields for the `logLevels` key: - -|Field|Description|Default| -|---|---|---| -|`userDaemon`|Logging level to be used by the User Daemon (logs to connector.log)|debug| -|`rootDaemon`|Logging level to be used for the Root Daemon (logs to daemon.log)|info| - -#### Images -Values for `images` are strings. These values affect the objects that are deployed in the cluster, -so it's important to ensure users have the same configuration. - -Additionally, you can deploy the server-side components with [Helm](../../install/helm), to prevent them -from being overridden by a client's config and use the [mutating-webhook](../cluster-config/#mutating-webhook) -to handle installation of the `traffic-agents`. - -These are the valid fields for the `images` key: - -|Field|Description|Default| -|---|---|---| -|`registry`|Docker registry to be used for installing the Traffic Manager and default Traffic Agent. If not using a helm chart to deploy server-side objects, changing this value will create a new traffic-manager deployment when using Telepresence commands. Additionally, changing this value will update installed default `traffic-agents` to use the new registry when creating a new intercept.|docker.io/datawire| -|`agentImage`|$registry/$imageName:$imageTag to use when installing the Traffic Agent. Changing this value will update pre-existing `traffic-agents` to use this new image. * the `registry` value is not used for the `traffic-agent` if you have this value set *|| -|`webhookRegistry`|The container $registry that the [Traffic Manager](../cluster-config/#mutating-webhook) will use with the `webhookAgentImage` *This value is only used if a new traffic-manager is deployed*|| -|`webhookAgentImage`|The container image that the [Traffic Manager](../cluster-config/#mutating-webhook) will use when installing the Traffic Agent in annotated pods *This value is only used if a new traffic-manager is deployed*|| - -#### Cloud -These fields control how the client interacts with the Cloud service. -Currently there is only one key and it accepts bools: `1`, `t`, `T`, `TRUE`, `true`, `True`, `0`, `f`, `F,` `FALSE` - -|Field|Description|Default| -|---|---|---| -|`skipLogin`|Whether the cli should skip automatic login to Ambassador Cloud. If set to true, you must have a [license](../cluster-config/#air-gapped-cluster) installed in the cluster in order to be able to perform selective intercepts |false| - -Telepresence attempts to auto-detect if the cluster is air-gapped, -be sure to set the `skipLogin` value to `true` - -Reminder: To use selective intercepts, which normally require a login, you -must have a license in your cluster and specify which agentImage should be installed, -by also adding the following to your config.yml: - ``` - images: - agentImage: / - ``` - -#### Grpc -The `maxReceiveSize` determines how large a message that the workstation receives via gRPC can be. The default is 4Mi (determined by gRPC). All traffic to and from the cluster is tunneled via gRPC. - -The size is measured in bytes. You can express it as a plain integer or as a fixed-point number using E, G, M, or K. You can also use the power-of-two equivalents: Gi, Mi, Ki. For example, the following represent roughly the same value: -``` -128974848, 129e6, 129M, 123Mi -``` - -## Per-Cluster Configuration -Some configuration is not global to Telepresence and is actually specific to a cluster. Thus, we store that config information in your kubeconfig file, so that it is easier to maintain per-cluster configuration. - -### Values -The current per-cluster configuration supports `dns`, `alsoProxy`, and `manager` keys. -To add configuration, simply add a `telepresence.io` entry to the cluster in your kubeconfig like so: - -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - also-proxy: - manager: - name: example-cluster -``` -#### DNS -The fields for `dns` are: local-ip, remote-ip, exclude-suffixes, include-suffixes, and lookup-timeout. - -|Field|Description|Type|Default| -|---|---|---|---| -|`local-ip`|The address of the local DNS server. This entry is only used on Linux system that are not configured to use systemd.resolved|ip|first line of /etc/resolv.conf| -|`remote-ip`|the address of the cluster's DNS service|ip|IP of the kube-dns.kube-system or the dns-default.openshift-dns service| -|`exclude-suffixes`|suffixes for which the DNS resolver will always fail (or fallback in case of the overriding resolver)|list|| -|`include-suffixes`|suffixes for which the DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes.|list|| -|`lookup-timeout`|maximum time to wait for a cluster side host lookup|duration|| - -Here is an example kubeconfig: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - include-suffixes: - - .se - exclude-suffixes: - - .com - name: example-cluster -``` - - -#### AlsoProxy -When using `also-proxy`, you provide a list of subnets after the key in your kubeconfig file to be added to the TUN device. All connections to addresses that the subnet spans will be dispatched to the cluster - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - also-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -#### Manager - -The `manager` key contains configuration for finding the `traffic-manager` that telepresence will connect to. It supports one key, `namespace`, indicating the namespace where the traffic manager is to be found - -Here is an example kubeconfig that will instruct telepresence to connect to a manager in namespace `staging`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` diff --git a/docs/v2.3/reference/docker-run.md b/docs/v2.3/reference/docker-run.md deleted file mode 100644 index 2262f0a5..00000000 --- a/docs/v2.3/reference/docker-run.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -Description: "How a Telepresence intercept can run a Docker container with configured environment and volume mounts." ---- - -# Using Docker for intercepts - -If you want your intercept to go to a Docker container on your laptop, use the `--docker-run` option. It creates the intercept, runs your container in the foreground, then automatically ends the intercept when the container exits. - -`telepresence intercept --port --docker-run -- ` - -The `--` separates flags intended for `telepresence intercept` from flags intended for `docker run`. - -## Example - -Imagine you are working on a new version of a your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. - -`telepresence intercept frontend-v1 --port 8000 --docker-run -- frontend-v2` - -## Ports - -The `--port` flag can specify an additional port when `--docker-run` is used so that the local and container port can be different. This is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. - -## Flags - -Telepresence will automatically pass some relevant flags to Docker in order to connect the container with the intercept. Those flags are combined with the arguments given after `--` on the command line. - -- `--dns-search tel2-search` Enables single label name lookups in intercepted namespaces -- `--env-file ` Loads the intercepted environment -- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line -- `-p ` The local port for the intercept and the container port -- `-v ` Volume mount specification, see CLI help for `--mount` and `--docker-mount` flags for more info diff --git a/docs/v2.3/reference/environment.md b/docs/v2.3/reference/environment.md deleted file mode 100644 index b5a799cc..00000000 --- a/docs/v2.3/reference/environment.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." ---- - -# Environment variables - -Telepresence can import environment variables from the cluster pod when running an intercept. -You can then use these variables with the code running on your laptop of the service being intercepted. - -There are three options available to do this: - -1. `telepresence intercept [service] --port [port] --env-file=FILENAME` - - This will write the environment variables to a Docker Compose `.env` file. This file can be used with `docker-compose` when starting containers locally. Please see the Docker documentation regarding the [file syntax](https://docs.docker.com/compose/env-file/) and [usage](https://docs.docker.com/compose/environment-variables/) for more information. - -2. `telepresence intercept [service] --port [port] --env-json=FILENAME` - - This will write the environment variables to a JSON file. This file can be injected into other build processes. - -3. `telepresence intercept [service] --port [port] -- [COMMAND]` - - This will run a command locally with the pod's environment variables set on your laptop. Once the command quits the intercept is stopped (as if `telepresence leave [service]` was run). This can be used in conjunction with a local server command, such as `python [FILENAME]` or `node [FILENAME]` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. - - Another use would be running a subshell, Bash for example: - - `telepresence intercept [service] --port [port] -- /bin/bash` - - This would start the intercept then launch the subshell on your laptop with all the same variables set as on the pod. diff --git a/docs/v2.3/reference/inside-container.md b/docs/v2.3/reference/inside-container.md deleted file mode 100644 index f83ef357..00000000 --- a/docs/v2.3/reference/inside-container.md +++ /dev/null @@ -1,37 +0,0 @@ -# Running Telepresence inside a container - -It is sometimes desirable to run Telepresence inside a container. One reason can be to avoid any side effects on the workstation's network, another can be to establish multiple sessions with the traffic manager, or even work with different clusters simultaneously. - -## Building the container - -Building a container with a ready-to-run Telepresence is easy because there are relatively few external dependencies. Add the following to a `Dockerfile`: - -```Dockerfile -# Dockerfile with telepresence and its prerequisites -FROM alpine:3.13 - -# Install Telepresence prerequisites -RUN apk add --no-cache curl iproute2 sshfs - -# Download and install the telepresence binary -RUN curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence -o telepresence && \ - install -o root -g root -m 0755 telepresence /usr/local/bin/telepresence -``` -In order to build the container, do this in the same directory as the `Dockerfile`: -``` -$ docker build -t tp-in-docker . -``` - -## Running the container - -Telepresence will need access to the `/dev/net/tun` device on your Linux host (or, in case the host isn't Linux, the Linux VM that Docker starts automatically), and a Kubernetes config that identifies the cluster. It will also need `--cap-add=NET_ADMIN` to create its Virtual Network Interface. - -The command to run the container can look like this: -```bash -$ docker run \ - --cap-add=NET_ADMIN \ - --device /dev/net/tun:/dev/net/tun \ - --network=host \ - -v ~/.kube/config:/root/.kube/config \ - -it --rm tp-in-docker -``` diff --git a/docs/v2.3/reference/intercepts.md b/docs/v2.3/reference/intercepts.md deleted file mode 100644 index ef484353..00000000 --- a/docs/v2.3/reference/intercepts.md +++ /dev/null @@ -1,170 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Intercepts - -## Intercept behavior when logged in to Ambassador Cloud - -After logging in to Ambassador Cloud (with [`telepresence -login`](../client/login/)), Telepresence will default to -`--preview-url=true`, which will use Ambassador Cloud to create a -sharable preview URL for this intercept. (Creating an intercept -without logging in defaults to `--preview-url=false`.) - -In order to do this, it will prompt you for four options. For the first, `Ingress`, Telepresence tries to intelligently determine the ingress controller deployment and namespace for you. If they are correct, you can hit `enter` to accept the defaults. Set the next two options, `TLS` and `Port`, appropriately based on your ingress service. The fourth is a hostname for the service, if required by your ingress. - -Also because you're logged in, Telepresence will default to `--mechanism=http --http-match=auto` (or just `--http-match=auto`; `--http-match` implies `--mechanism=http`). If you hadn't been logged in it would have defaulted to `--mechanism=tcp`. This tells it to do smart intercepts and only intercept a subset of HTTP requests, rather than just intercepting the entirety of all TCP connections. This is important for working in a shared cluster with teammates, and is important for the preview URL functionality. See `telepresence intercept --help` for information on using `--http-match` to customize which requests it intercepts. - -## Supported workloads - -Kubernetes has various [workloads](https://kubernetes.io/docs/concepts/workloads/). Currently, telepresence supports intercepting Deployments, ReplicaSets, and StatefulSets. - While many of our examples may use Deployments, they would also work on ReplicaSets and StatefulSets - -## Specifying a namespace for an intercept - -The namespace of the intercepted workload is specified using the `--namespace` option. When this option is used, and `--workload` is not used, then the given name is interpreted as the name of the workload and the name of the intercept will be constructed from that name and the namespace. - -``` -telepresence intercept hello --namespace myns --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept -`hello-myns`. In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the workload is specified. - -``` -telepresence intercept myhello --namespace myns --workload hello --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept `myhello`. - -## Importing environment variables - -Telepresence can import the environment variables from the pod that is being intercepted, see [this doc](../environment/) for more details. - -## Creating an intercept without a local process running - -When creating an intercept that is selective (the default if you are -logged in to Ambassador Cloud), the Traffic Agent sends a GET `/` -request to your service and the process running on your local machine -at the port specified in your intercept to determine if they support -HTTP/2. This is required for selective intercepts to behave correctly. - -If you do not have a service running locally, the Traffic Agent will use the result -it gets from the HTTP check against your app in the cluster to configure requests -from the local process once it has started. - -## Creating an intercept Without a preview URL - -If you *are not* logged in to Ambassador Cloud, the following command -will intercept all traffic bound to the service and proxy it to your -laptop. This includes traffic coming through your ingress controller, -so use this option carefully as to not disrupt production -environments. - -``` -telepresence intercept --port= -``` - -If you *are* logged in to Ambassador Cloud, setting the -`--preview-url` flag to `false` is necessary. - -``` -telepresence intercept --port= --preview-url=false -``` - -This will output a header that you can set on your request for that traffic to be intercepted: - -``` -$ telepresence intercept --port= --preview-url=false -Using Deployment -intercepted - Intercept name: - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -``` -$ telepresence status -Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" -User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 1 total - dataprocessingnodeservice: @ -``` - -Finally, run `telepresence leave ` to stop the intercept. - -## Creating an intercept when a service has multiple ports - -If you are trying to intercept a service that has multiple ports, you need to tell telepresence which service port you are trying to intercept. To specify, you can either use the name of the service port or the port number itself. To see which options might be available to you and your service, use kubectl to describe your service or look in the object's YAML. For more information on multiple ports, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services). - -``` -$ telepresence intercept --port=: -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -When intercepting a service that has multiple ports, the name of the service port that has been intercepted is also listed. - -If you want to change which port has been intercepted, you can create a new intercept the same way you did above and it will change which service port is being intercepted. - -## Creating an intercept When multiple services match your workload - -Oftentimes, there's a 1-to-1 relationship between a service and a workload, so telepresence is able to auto-detect which service it should intercept based on the workload you are trying to intercept. But if you use something like [Argo](https://www.getambassador.io/docs/argo/latest/), it uses two services (that use the same labels) to manage traffic between a canary and a stable service. - -Fortunately, if you know which service you want to use when intercepting a workload, you can use the --service flag. So in the aforementioned demo, if you wanted to use the `echo-stable` service when intercepting your workload, your command would look like this: -``` -$ telepresence intercept echo-rollout- --port --service echo-stable -Using ReplicaSet echo-rollout- -intercepted - Intercept name : echo-rollout- - State : ACTIVE - Workload kind : ReplicaSet - Destination : 127.0.0.1:3000 - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 - Intercepting : all TCP connections -``` - -## Port-forwarding an intercepted container's sidecars - -Sidecars are containers that sit in the same pod as an application container; they usually provide auxiliary functionality to an application, and can usually be reached at `localhost:${SIDECAR_PORT}`. -For example, a common use case for a sidecar is to proxy requests to a database -- your application would connect to `localhost:${SIDECAR_PORT}`, and the sidecar would then connect to the database, perhaps augmenting the connection with TLS or authentication. - -When intercepting a container that uses sidecars, you might want those sidecars' ports to be available to your local application at `localhost:${SIDECAR_PORT}`, exactly as they would be if running in-cluster. -Telepresence's `--to-pod ${PORT}` flag implements this behavior, adding port-forwards for the port given. - -``` -$ telepresence intercept --port=: --to-pod= -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -If there are multiple ports that you need forwarded, simply repeat the flag (`--to-pod= --to-pod=`). diff --git a/docs/v2.3/reference/linkerd.md b/docs/v2.3/reference/linkerd.md deleted file mode 100644 index 9b903fa7..00000000 --- a/docs/v2.3/reference/linkerd.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -Description: "How to get Linkerd meshed services working with Telepresence" ---- - -# Using Telepresence with Linkerd - -## Introduction -Getting started with Telepresence on Linkerd services is as simple as adding an annotation to your Deployment: - -```yaml -spec: - template: - metadata: - annotations: - config.linkerd.io/skip-outbound-ports: "8081" -``` - -The local system and the Traffic Agent connect to the Traffic Manager using its gRPC API on port 8081. Telling Linkerd to skip that port allows the Traffic Agent sidecar to fully communicate with the Traffic Manager, and therefore the rest of the Telepresence system. - -## Prerequisites -1. [Telepresence binary](../../install) -2. Linkerd control plane [installed to cluster](https://linkerd.io/2.10/tasks/install/) -3. Kubectl -4. [Working ingress controller](https://www.getambassador.io/docs/edge-stack/latest/howtos/linkerd2) - -## Deploy -Save and deploy the following YAML. Note the `config.linkerd.io/skip-outbound-ports` annotation in the metadata of the pod template. - -```yaml ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: quote -spec: - replicas: 1 - selector: - matchLabels: - app: quote - strategy: - type: RollingUpdate - template: - metadata: - annotations: - linkerd.io/inject: "enabled" - config.linkerd.io/skip-outbound-ports: "8081,8022,6001" - labels: - app: quote - spec: - containers: - - name: backend - image: docker.io/datawire/quote:0.4.1 - ports: - - name: http - containerPort: 8000 - env: - - name: PORT - value: "8000" - resources: - limits: - cpu: "0.1" - memory: 100Mi -``` - -## Connect to Telepresence -Run `telepresence connect` to connect to the cluster. Then `telepresence list` should show the `quote` deployment as `ready to intercept`: - -``` -$ telepresence list - - quote: ready to intercept (traffic-agent not yet installed) -``` - -## Run the intercept -Run `telepresence intercept quote --port 8080:80` to direct traffic from the `quote` deployment to port 8080 on your local system. Assuming you have something listening on 8080, you should now be able to see your local service whenever attempting to access the `quote` service. diff --git a/docs/v2.3/reference/rbac.md b/docs/v2.3/reference/rbac.md deleted file mode 100644 index 4facd8b5..00000000 --- a/docs/v2.3/reference/rbac.md +++ /dev/null @@ -1,211 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Telepresence RBAC -The intention of this document is to provide a template for securing and limiting the permissions of Telepresence. -This documentation covers the full extent of permissions necessary to administrate Telepresence components in a cluster. - -There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [intercept](../../howtos/intercepts/), and otherwise be unable to affect Kubernetes resources. - -In addition to the above, there is also a consideration of how to manage Users and Groups in Kubernetes which is outside of the scope of the document. This document will use Service Accounts to assign Roles and Bindings. Other methods of RBAC administration and enforcement can be found on the [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) page. - -## Requirements - -- Kubernetes version 1.16+ -- Cluster admin privileges to apply RBAC - -## Editing your kubeconfig - -This guide also assumes that you are utilizing a kubeconfig file that is specified by the `KUBECONFIG` environment variable. This is a `yaml` file that contains the cluster's API endpoint information as well as the user data being supplied for authentication. The Service Account name used in the example below is called tp-user. This can be replaced by any value (i.e. John or Jane) as long as references to the Service Account are consistent throughout the `yaml`. After an administrator has applied the RBAC configuration, a user should create a `config.yaml` in your current directory that looks like the following:​ - -```yaml -apiVersion: v1 -kind: Config -clusters: -- name: my-cluster # Must match the cluster value in the contexts config - cluster: - ## The cluster field is highly cloud dependent. -contexts: -- name: my-context - context: - cluster: my-cluster # Must match the name field in the clusters config - user: tp-user -users: -- name: tp-user # Must match the name of the Service Account created by the cluster admin - user: - token: # See note below -``` - -The Service Account token will be obtained by the cluster administrator after they create the user's Service Account. Creating the Service Account will create an associated Secret in the same namespace with the format `-token-`. This token can be obtained by your cluster administrator by running `kubectl get secret -n ambassador -o jsonpath='{.data.token}' | base64 -d`. - -After creating `config.yaml` in your current directory, export the file's location to KUBECONFIG by running `export KUBECONFIG=$(pwd)/config.yaml`. You should then be able to switch to this context by running `kubectl config use-context my-context`. - -## Administrating Telepresence - -[Telepresence administration](/products/telepresence/) requires permissions for creating `Namespaces`, `ServiceAccounts`, `ClusterRoles`, `ClusterRoleBindings`, `Secrets`, `Services`, `MutatingWebhookConfiguration`, and for creating the `traffic-manager` [deployment](../architecture/#traffic-manager) which is typically done by a full cluster administrator. - -There are two ways to install the traffic-manager: Using `telepresence connect` and installing the [helm chart](../../install/helm/). - -By using `telepresence connect`, Telepresence will use your kubeconfig to create the objects mentioned above in the cluster if they don't already exist. If you want the most introspection into what is being installed, we recommend using the helm chart to install the traffic-manager. - -## Cluster-wide telepresence user access - -To allow users to make intercepts across all namespaces, but with more limited `kubectl` permissions, the following `ServiceAccount`, `ClusterRole`, and `ClusterRoleBinding` will allow full `telepresence intercept` functionality. - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate value - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch", "update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: telepresence-rolebinding -subjects: -- name: tp-user - kind: ServiceAccount - namespace: ambassador -roleRef: - apiGroup: rbac.authorization.k8s.io - name: telepresence-role - kind: ClusterRole -``` - -## Namespace only telepresence user access - -RBAC for multi-tenant scenarios where multiple dev teams are sharing a single cluster where users are constrained to a specific namespace(s). - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate user name - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] ---- -kind: RoleBinding # RBAC to access ambassador namespace -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: t2-ambassador-binding - namespace: ambassador -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding # RoleBinding T2 namespace to be intecpeted -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-test-binding # Update "test" for appropriate namespace to be intercepted - namespace: test # Update "test" for appropriate namespace to be intercepted -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io -​ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-role -rules: -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-binding -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-namespace-role - apiGroup: rbac.authorization.k8s.io -``` diff --git a/docs/v2.3/reference/routing.md b/docs/v2.3/reference/routing.md deleted file mode 100644 index 75e36f00..00000000 --- a/docs/v2.3/reference/routing.md +++ /dev/null @@ -1,48 +0,0 @@ -# Connection Routing - -## Outbound - -### DNS resolution -When requesting a connection to a host, the IP of that host must be determined. Telepresence provides DNS resolvers to help with this task. There are currently three types of resolvers but only one of them will be used on a workstation at any given time. Common for all of them is that they will propagate a selection of the host lookups to be performed in the cluster. The selection normally includes all names ending with `.cluster.local` or a currently mapped namespace but more entries can be added to the list using the `include-suffixes` option in the -[local DNS configuration](../config/#dns) - -#### Cluster side DNS lookups -The cluster side host lookup will be performed by the traffic-manager unless the client has an active intercept, in which case, the agent performing that intercept will be responsible for doing it. If the client has multiple intercepts, then all of them will be asked to perform the lookup, and the response to the client will contain the unique sum of IPs that they produce. It's therefore important to never have multiple intercepts that span more than one namespace[[1](#namespacelimit)]. The reason for asking all of them is that the workstation currently impersonates multiple containers, and it is not possible to determine on behalf of what container the lookup request is made. - -#### macOS resolver -This resolver hooks into the macOS DNS system by creating files under `/etc/resolver`. Those files correspond to some domain and contain the port number of the Telepresence resolver. Telepresence creates one such file for each of the currently mapped namespaces and `include-suffixes` option. The file `telepresence.local` contains a search path that is configured based on current intercepts so that single label names can be resolved correctly. - -#### Linux systemd-resolved resolver -This resolver registers itself as part of telepresence's [VIF](../tun-device) using `systemd-resolved` and uses the DBus API to configure domains and routes that corresponds to the current set of intercepts and namespaces. - -#### Linux overriding resolver -Linux systems that aren't configured with `systemd-resolved` will use this resolver. A Typical case is when running Telepresence [inside a docker container](../inside-container). During initialization, the resolver will first establish a _fallback_ connection to the IP passed as `--dns`, the one configured as `local-ip` in the [local DNS configuration](../config/#dns), or the primary `nameserver` registered in `/etc/resolv.conf`. It will then use iptables to actually override that IP so that requests to it instead end up in the overriding resolver, which unless it succeeds on its own, will use the _fallback_. - -### Routing - -#### Subnets -The Telepresence `traffic-manager` service is responsible for discovering the cluster's Service subnet and all subnets used by the pods. In order to do this, it needs permission to create a dummy service[[2](#servicesubnet)] in its own namespace, and the ability to list, get, and watch nodes and pods. Some clusters will expose the pod subnets as `podCIDR` in the `Node` but some, like Amazon EKS, typically don't. Telepresence will then fall back to deriving the subnets from the IPs of all pods. - -The complete set of subnets that the [VIF](../tun-device) will be configured with is dynamic and may change during a connection's life cycle as new nodes arrive or disappear from the cluster. The set consists of what that the traffic-manager finds in the cluster, and the subnets configured using the [also-proxy](../config#alsoproxy) configuration option. Telepresence will remove subnets that are equal to, or completely covered by, other subnets. - -#### Connection origin -A request to connect to an IP-address that belongs to one of the subnets of the [VIF](../tun-device) will cause a connection request to be made in the cluster. As with host name lookups, the request will originate from the traffic-manager unless the client has ongoing intercepts. If it does, one of the intercepted pods will be chosen, and the request will instead originate from that pod. This is a best-effort approach. Telepresence only knows that the request originated from the workstation. It cannot know that it is intended to originate from a specific pod when multiple intercepts are active. - -A `--local-only` intercept will not have any effect on the connection origin because there is no pod from which the connection can originate. The intercept must be made on a workload that has been deployed in the cluster if there's a requirement for correct connection origin. - -There are multiple reasons for doing this. One is that it is important that the request originates from the correct namespace. Example: - -```bash -curl some-host -``` -results in a http request with header `Host: some-host`. Now, if a service-mesh like Istio performs header based routing, then it will fail to find that host unless the request originates from the same namespace as the host resides in. Another reason is that the configuration of a service mesh can contain very strict rules. If the request then originates from the wrong pod, it will be denied. Only one intercept at a time can be used if there is a need to ensure that the chosen pod is exactly right. - -## Inbound - -The traffic-manager and traffic-agent are mutually responsible for setting up the necessary connection to the workstation when an intercept becomes active. In versions prior to 2.3.2, this would be accomplished by the traffic-manager creating a port dynamically that it would pass to the traffic-agent. The traffic-agent would then forward the intercepted connection to that port, and the traffic-manager would forward it to the workstation. This lead to problems when integrating with service meshes like Istio since those dynamic ports needed to be configured. It also imposed an undesired requirement to be able to use mTLS between the traffic-manager and traffic-agent. - -In 2.3.2, this changes, so that the traffic-agent instead creates a tunnel to the traffic-manager using the already existing gRPC API connection. The traffic-manager then forwards that using another tunnel to the workstation. This is completely invisible to other service meshes and is therefore much easier to configure. - -##### Footnotes: -

1: A future version of Telepresence will not allow concurrent intercepts that span multiple namespaces.

-

2: The error message from an attempt to create a service in a bad subnet contains the service subnet. The trick of creating a dummy service is currently the only way to get Kubernetes to expose that subnet.

diff --git a/docs/v2.3/reference/tun-device.md b/docs/v2.3/reference/tun-device.md deleted file mode 100644 index 4410f6f3..00000000 --- a/docs/v2.3/reference/tun-device.md +++ /dev/null @@ -1,27 +0,0 @@ -# Networking through Virtual Network Interface - -The Telepresence daemon process creates a Virtual Network Interface (VIF) when Telepresence connects to the cluster. The VIF ensures that the cluster's subnets are available to the workstation. It also intercepts DNS requests and forwards them to the traffic-manager which in turn forwards them to intercepted agents, if any, or performs a host lookup by itself. - -### TUN-Device -The VIF is a TUN-device, which means that it communicates with the workstation in terms of L3 IP-packets. The router will recognize UDP and TCP packets and tunnel their payload to the traffic-manager via its encrypted gRPC API. The traffic-manager will then establish corresponding connections in the cluster. All protocol negotiation takes place in the client because the VIF takes care of the L3 to L4 translation (i.e. the tunnel is L4, not L3). - -## Gains when using the VIF - -### Both TCP and UDP -The TUN-device is capable of routing both TCP and UDP for outbound traffic. Earlier versions of Telepresence would only allow TCP. Future enhancements might be to also route inbound UDP, and perhaps a selection of ICMP packages (to allow for things like `ping`). - -### No SSH required - -The VIF approach is somewhat similar to using `sshuttle` but without -any requirements for extra software, configuration or connections. -Using the VIF means that only one single connection needs to be -forwarded through the Kubernetes apiserver (à la `kubectl -port-forward`), using only one single port. There is no need for -`ssh` in the client nor for `sshd` in the traffic-manager. This also -means that the traffic-manager container can run as the default user. - -#### sshfs without ssh encryption -When a POD is intercepted, and its volumes are mounted on the local machine, this mount is performed by [sshfs](https://github.com/libfuse/sshfs). Telepresence will run `sshfs -o slave` which means that instead of using `ssh` to establish an encrypted communication to an `sshd`, which in turn terminates the encryption and forwards to `sftp`, the `sshfs` will talk `sftp` directly on its `stdin/stdout` pair. Telepresence tunnels that directly to an `sftp` in the agent using its already encrypted gRPC API. As a result, no `sshd` is needed in client nor in the traffic-agent, and the traffic-agent container can run as the default user. - -### No Firewall rules -With the VIF in place, there's no longer any need to tamper with firewalls in order to establish IP routes. The VIF makes the cluster subnets available during connect, and the kernel will perform the routing automatically. When the session ends, the kernel is also responsible for cleaning up. diff --git a/docs/v2.3/reference/volume.md b/docs/v2.3/reference/volume.md deleted file mode 100644 index 2e0e8bc5..00000000 --- a/docs/v2.3/reference/volume.md +++ /dev/null @@ -1,36 +0,0 @@ -# Volume mounts - -import Alert from '@material-ui/lab/Alert'; - -Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the intercept, this could be a subshell or local server such as Python or Node. - -``` -telepresence intercept --port --mount=/tmp/ -- /bin/bash -``` - -In this case, Telepresence creates the intercept, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. - -Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point in the output of `telepresence list` or using the `$TELEPRESENCE_ROOT` variable. - -``` -$ telepresence intercept --port --mount=true -- /bin/bash -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 - Intercepting : all TCP connections - -bash-3.2$ echo $TELEPRESENCE_ROOT -/var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 -``` - ---mount=true is the default if a mount option is not specified, use --mount=false to disable mounting volumes. - -With either method, the code you run locally either from the subshell or from the intercept command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilitze the mounted volumes. - -For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. - -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. diff --git a/docs/v2.3/release-notes/no-ssh.png b/docs/v2.3/release-notes/no-ssh.png deleted file mode 100644 index 025f20ab..00000000 Binary files a/docs/v2.3/release-notes/no-ssh.png and /dev/null differ diff --git a/docs/v2.3/release-notes/run-tp-in-docker.png b/docs/v2.3/release-notes/run-tp-in-docker.png deleted file mode 100644 index 53b66a9b..00000000 Binary files a/docs/v2.3/release-notes/run-tp-in-docker.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.2.png b/docs/v2.3/release-notes/telepresence-2.2.png deleted file mode 100644 index 43abc7e8..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.2.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.0-homebrew.png b/docs/v2.3/release-notes/telepresence-2.3.0-homebrew.png deleted file mode 100644 index e203a975..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.0-homebrew.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.0-loglevels.png b/docs/v2.3/release-notes/telepresence-2.3.0-loglevels.png deleted file mode 100644 index 3d628c54..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.0-loglevels.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.1-alsoProxy.png b/docs/v2.3/release-notes/telepresence-2.3.1-alsoProxy.png deleted file mode 100644 index 4052b927..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.1-alsoProxy.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.1-brew.png b/docs/v2.3/release-notes/telepresence-2.3.1-brew.png deleted file mode 100644 index 2af42490..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.1-brew.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.1-dns.png b/docs/v2.3/release-notes/telepresence-2.3.1-dns.png deleted file mode 100644 index c6335e7a..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.1-dns.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.1-inject.png b/docs/v2.3/release-notes/telepresence-2.3.1-inject.png deleted file mode 100644 index aea1003e..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.1-inject.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.1-large-file-transfer.png b/docs/v2.3/release-notes/telepresence-2.3.1-large-file-transfer.png deleted file mode 100644 index 48ceb381..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.1-large-file-transfer.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.1-trafficmanagerconnect.png b/docs/v2.3/release-notes/telepresence-2.3.1-trafficmanagerconnect.png deleted file mode 100644 index 78128c17..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.1-trafficmanagerconnect.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.2-subnets.png b/docs/v2.3/release-notes/telepresence-2.3.2-subnets.png deleted file mode 100644 index 778c722a..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.2-subnets.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.2-svcport-annotation.png b/docs/v2.3/release-notes/telepresence-2.3.2-svcport-annotation.png deleted file mode 100644 index 1e1e9240..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.2-svcport-annotation.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.3-helm.png b/docs/v2.3/release-notes/telepresence-2.3.3-helm.png deleted file mode 100644 index 7b81480a..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.3-helm.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.3-namespace-config.png b/docs/v2.3/release-notes/telepresence-2.3.3-namespace-config.png deleted file mode 100644 index 7864d3a3..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.3-namespace-config.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.3-to-pod.png b/docs/v2.3/release-notes/telepresence-2.3.3-to-pod.png deleted file mode 100644 index aa7be3f6..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.3-to-pod.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.4-improved-error.png b/docs/v2.3/release-notes/telepresence-2.3.4-improved-error.png deleted file mode 100644 index fa8a1298..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.4-improved-error.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.4-ip-error.png b/docs/v2.3/release-notes/telepresence-2.3.4-ip-error.png deleted file mode 100644 index 1d37380c..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.4-ip-error.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.5-agent-config.png b/docs/v2.3/release-notes/telepresence-2.3.5-agent-config.png deleted file mode 100644 index 67d6d3e8..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.5-agent-config.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.5-grpc-max-receive-size.png b/docs/v2.3/release-notes/telepresence-2.3.5-grpc-max-receive-size.png deleted file mode 100644 index 32939f9d..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.5-grpc-max-receive-size.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.5-skipLogin.png b/docs/v2.3/release-notes/telepresence-2.3.5-skipLogin.png deleted file mode 100644 index bf79c191..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.5-skipLogin.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png b/docs/v2.3/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png deleted file mode 100644 index d29a05ad..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.7-keydesc.png b/docs/v2.3/release-notes/telepresence-2.3.7-keydesc.png deleted file mode 100644 index 9bffe5cc..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.7-keydesc.png and /dev/null differ diff --git a/docs/v2.3/release-notes/telepresence-2.3.7-newkey.png b/docs/v2.3/release-notes/telepresence-2.3.7-newkey.png deleted file mode 100644 index c7d47c42..00000000 Binary files a/docs/v2.3/release-notes/telepresence-2.3.7-newkey.png and /dev/null differ diff --git a/docs/v2.3/release-notes/tunnel.jpg b/docs/v2.3/release-notes/tunnel.jpg deleted file mode 100644 index 59a0397e..00000000 Binary files a/docs/v2.3/release-notes/tunnel.jpg and /dev/null differ diff --git a/docs/v2.3/releaseNotes.yml b/docs/v2.3/releaseNotes.yml deleted file mode 100644 index 700272bf..00000000 --- a/docs/v2.3/releaseNotes.yml +++ /dev/null @@ -1,452 +0,0 @@ -# This file should be placed in the folder for the version of the -# product that's meant to be documented. A `/release-notes` page will -# be automatically generated and populated at build time. -# -# Note that an entry needs to be added to the `doc-links.yml` file in -# order to surface the release notes in the table of contents. -# -# The YAML in this file should contain: -# -# changelog: An (optional) URL to the CHANGELOG for the product. -# items: An array of releases with the following attributes: -# - version: The (optional) version number of the release, if applicable. -# - date: The date of the release in the format YYYY-MM-DD. -# - notes: An array of noteworthy changes included in the release, each having the following attributes: -# - type: The type of change, one of `bugfix`, `feature`, `security` or `change`. -# - title: A short title of the noteworthy change. -# - body: >- -# Two or three sentences describing the change and why it -# is noteworthy. This is HTML, not plain text or -# markdown. It is handy to use YAML's ">-" feature to -# allow line-wrapping. -# - image: >- -# The URL of an image that visually represents the -# noteworthy change. This path is relative to the -# `release-notes` directory; if this file is -# `FOO/releaseNotes.yml`, then the image paths are -# relative to `FOO/release-notes/`. -# - docs: The path to the documentation page where additional information can be found. -# - href: A path from the root to a resource on the getambassador website, takes precedence over a docs link. - -docTitle: Telepresence Release Notes -docDescription: >- - Release notes for Telepresence by Ambassador Labs, a CNCF project - that enables developers to iterate rapidly on Kubernetes - microservices by arming them with infinite-scale development - environments, access to instantaneous feedback loops, and highly - customizable development environments. - -changelog: https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md - -items: - - - version: 2.3.7 - date: '2021-07-23' - notes: - - - type: feature - title: Also-proxy in telepresence status - body: >- - An also-proxy entry in the Kubernetes cluster config will - show up in the output of the telepresence status command. - docs: reference/config - - - type: feature - title: Non-interactive telepresence login - body: >- - telepresence login now has an - --apikey=KEY flag that allows for - non-interactive logins. This is useful for headless - environments where launching a web-browser is impossible, - such as cloud shells, Docker containers, or CI. - image: telepresence-2.3.7-newkey.png - docs: reference/client/login/ - - - type: bugfix - title: Mutating webhook injector correctly hides named ports for probes. - body: >- - The mutating webhook injector has been fixed to correctly rename named ports for liveness and readiness probes - docs: reference/cluster-config - - - type: bugfix - title: telepresence current-cluster-id crash fixed - body: >- - Fixed a regression introduced in 2.3.5 that caused `telepresence current-cluster-id` - to crash. - docs: reference/cluster-config - - - type: bugfix - title: Better UX around intercepts with no local process running - body: >- - Requests would hang indefinitely when initiating an intercept before you - had a local process running. This has been fixed and will result in an - Empty reply from server until you start a local process. - docs: reference/intercepts - - - type: bugfix - title: API keys no longer show as "no description" - body: >- - New API keys generated internally for communication with - Ambassador Cloud no longer show up as "no description" in - the Ambassador Cloud web UI. Existing API keys generated by - older versions of Telepresence will still show up this way. - image: telepresence-2.3.7-keydesc.png - - - type: bugfix - title: Fix corruption of user-info.json - body: >- - Fixed a race condition that logging in and logging out - rapidly could cause memory corruption or corruption of the - user-info.json cache file used when - authenticating with Ambassador Cloud. - - - type: bugfix - title: Improved DNS resolver for systemd-resolved - body: Telepresence's systemd-resolved-based DNS resolver is now more - stable and in case it fails to initialize, the overriding resolver - will no longer cause general DNS lookup failures when telepresence defaults to - using it. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Faster telepresence list command - body: The performance of telepresence list has been increased - significantly by reducing the number of calls the command makes to the cluster. - docs: reference/client - - - version: 2.3.6 - date: '2021-07-20' - notes: - - - type: bugfix - title: Fix preview URLs - body: >- - Fixed a regression introduced in 2.3.5 that caused preview - URLs to not work. - - - type: bugfix - title: Fix subnet discovery - body: >- - Fixed a regression introduced in 2.3.5 where the Traffic - Manager's RoleBinding did not correctly appoint - the traffic-manager Role, causing - subnet discovery to not be able to work correctly. - docs: reference/rbac/ - - - type: bugfix - title: Fix root-user configuration loading - body: >- - Fixed a regression introduced in 2.3.5 where the root daemon - did not correctly read the configuration file; ignoring the - user's configured log levels and timeouts. - docs: reference/config/ - - - type: bugfix - title: Fix a user daemon crash - body: >- - Fixed an issue that could cause the user daemon to crash - during shutdown, as during shutdown it unconditionally - attempted to close a channel even though the channel might - already be closed. - - - version: 2.3.5 - date: '2021-07-15' - notes: - - type: feature - title: traffic-manager in multiple namespaces - body: >- - We now support installing multiple traffic managers in the same cluster. - This will allow operators to install deployments of telepresence that are - limited to certain namespaces. - image: ./telepresence-2.3.5-traffic-manager-namespaces.png - docs: install/helm - - type: feature - title: No more dependence on kubectl - body: >- - Telepresence no longer depends on having an external - kubectl binary, which might not be present for - OpenShift users (who have oc instead of - kubectl). - - type: feature - title: Agent image now configurable - body: >- - We now support configuring which agent image + registry to use in the - config. This enables users whose laptop is an air-gapped environment to - create selective intercepts without requiring a login. It also makes it easier - for those who are developing on Telepresence to specify which agent image should - be used. Env vars TELEPRESENCE_AGENT_IMAGE and TELEPRESENCE_REGISTRY are no longer - used. - image: ./telepresence-2.3.5-agent-config.png - docs: reference/config/#images - - type: feature - title: Max gRPC receive size now configurable - body: >- - The default max size of messages received through gRPC (4 MB) is sometimes insufficient. It can now be configured. - image: ./telepresence-2.3.5-grpc-max-receive-size.png - docs: reference/config/#grpc - - type: feature - title: CLI can be used in air-gapped environments - body: >- - While Telepresence will auto-detect if your cluster is in an air-gapped environment, - we've added an option users can add to their config.yml to ensure the cli acts like it - is in an air-gapped environment. Air-gapped environments require a manually installed - licence. - docs: reference/cluster-config/#air-gapped-cluster - image: ./telepresence-2.3.5-skipLogin.png - - version: 2.3.4 - date: '2021-07-09' - notes: - - type: bugfix - title: Improved IP log statements - body: >- - Some log statements were printing incorrect characters, when they should have been IP addresses. - This has been resolved to include more accurate and useful logging. - docs: reference/config/#log-levels - image: ./telepresence-2.3.4-ip-error.png - - type: bugfix - title: Improved messaging when multiple services match a workload - body: >- - If multiple services matched a workload when performing an intercept, Telepresence would crash. - It now gives the correct error message, instructing the user on how to specify which - service the intercept should use. - image: ./telepresence-2.3.4-improved-error.png - docs: reference/intercepts - - type: bugfix - title: Traffic-manger creates services in its own namespace to determine subnet - body: >- - Telepresence will now determine the service subnet by creating a dummy-service in its own - namespace, instead of the default namespace, which was causing RBAC permissions issues in - some clusters. - docs: reference/routing/#subnets - - type: bugfix - title: Telepresence connect respects pre-existing clusterrole - body: >- - When Telepresence connects, if the traffic-manager's desired clusterrole already exists in the - cluster, Telepresence will no longer try to update the clusterrole. - docs: reference/rbac - - type: bugfix - title: Helm Chart fixed for clientRbac.namespaced - body: >- - The Telepresence Helm chart no longer fails when installing with --set clientRbac.namespaced=true. - docs: install/helm - - version: 2.3.3 - date: '2021-07-07' - notes: - - type: feature - title: Traffic Manager Helm Chart - body: >- - Telepresence now supports installing the Traffic Manager via Helm. - This will make it easy for operators to install and configure the - server-side components of Telepresence separately from the CLI (which - in turn allows for better separation of permissions). - image: ./telepresence-2.3.3-helm.png - docs: install/helm/ - - type: feature - title: Traffic-manager in custom namespace - body: >- - As the traffic-manager can now be installed in any - namespace via Helm, Telepresence can now be configured to look for the - Traffic Manager in a namespace other than ambassador. - This can be configured on a per-cluster basis. - image: ./telepresence-2.3.3-namespace-config.png - docs: reference/config - - type: feature - title: Intercept --to-pod - body: >- - telepresence intercept now supports a - --to-pod flag that can be used to port-forward sidecars' - ports from an intercepted pod. - image: ./telepresence-2.3.3-to-pod.png - docs: reference/intercepts - - type: change - title: Change in migration from edgectl - body: >- - Telepresence no longer automatically shuts down the old - api_version=1 edgectl daemon. If migrating - from such an old version of edgectl you must now manually - shut down the edgectl daemon before running Telepresence. - This was already the case when migrating from the newer - api_version=2 edgectl. - - type: bugfix - title: Fixed error during shutdown - body: >- - The root daemon no longer terminates when the user daemon disconnects - from its gRPC streams, and instead waits to be terminated by the CLI. - This could cause problems with things not being cleaned up correctly. - - type: bugfix - title: Intercepts will survive deletion of intercepted pod - body: >- - An intercept will survive deletion of the intercepted pod provided - that another pod is created (or already exists) that can take over. - - version: 2.3.2 - date: '2021-06-18' - notes: - # Headliners - - type: feature - title: Service Port Annotation - body: >- - The mutator webhook for injecting traffic-agents now - recognizes a - telepresence.getambassador.io/inject-service-port - annotation to specify which port to intercept; bringing the - functionality of the --port flag to users who - use the mutator webook in order to control Telepresence via - GitOps. - image: ./telepresence-2.3.2-svcport-annotation.png - docs: reference/cluster-config#service-port-annotation - - type: feature - title: Outbound Connections - body: >- - Outbound connections are now routed through the intercepted - Pods which means that the connections originate from that - Pod from the cluster's perspective. This allows service - meshes to correctly identify the traffic. - docs: reference/routing/#outbound - - type: change - title: Inbound Connections - body: >- - Inbound connections from an intercepted agent are now - tunneled to the manager over the existing gRPC connection, - instead of establishing a new connection to the manager for - each inbound connection. This avoids interference from - certain service mesh configurations. - docs: reference/routing/#inbound - - # RBAC changes - - type: change - title: Traffic Manager needs new RBAC permissions - body: >- - The Traffic Manager requires RBAC - permissions to list Nodes, Pods, and to create a dummy - Service in the manager's namespace. - docs: reference/routing/#subnets - - type: change - title: Reduced developer RBAC requirements - body: >- - The on-laptop client no longer requires RBAC permissions to list the Nodes - in the cluster or to create Services, as that functionality - has been moved to the Traffic Manager. - - # Bugfixes - - type: bugfix - title: Able to detect subnets - body: >- - Telepresence will now detect the Pod CIDR ranges even if - they are not listed in the Nodes. - image: ./telepresence-2.3.2-subnets.png - docs: reference/routing/#subnets - - type: bugfix - title: Dynamic IP ranges - body: >- - The list of cluster subnets that the virtual network - interface will route is now configured dynamically and will - follow changes in the cluster. - - type: bugfix - title: No duplicate subnets - body: >- - Subnets fully covered by other subnets are now pruned - internally and thus never superfluously added to the - laptop's routing table. - docs: reference/routing/#subnets - - type: change # not a bugfix, but it only makes sense to mention after the above bugfixes - title: Change in default timeout - body: >- - The trafficManagerAPI timeout default has - changed from 5 seconds to 15 seconds, in order to facilitate - the extended time it takes for the traffic-manager to do its - initial discovery of cluster info as a result of the above - bugfixes. - - type: bugfix - title: Removal of DNS config files on macOS - body: >- - On macOS, files generated under - /etc/resolver/ as the result of using - include-suffixes in the cluster config are now - properly removed on quit. - docs: reference/routing/#mac-os-resolver - - - type: bugfix - title: Large file transfers - body: >- - Telepresence no longer erroneously terminates connections - early when sending a large HTTP response from an intercepted - service. - - type: bugfix - title: Race condition in shutdown - body: >- - When shutting down the user-daemon or root-daemon on the - laptop, telepresence quit and related commands - no longer return early before everything is fully shut down. - Now it can be counted on that by the time the command has - returned that all of the side-effects on the laptop have - been cleaned up. - - version: 2.3.1 - date: '2021-06-14' - notes: - - title: DNS Resolver Configuration - body: "Telepresence now supports per-cluster configuration for custom dns behavior, which will enable users to determine which local + remote resolver to use and which suffixes should be ignored + included. These can be configured on a per-cluster basis." - image: ./telepresence-2.3.1-dns.png - docs: reference/config - type: feature - - title: AlsoProxy Configuration - body: "Telepresence now supports also proxying user-specified subnets so that they can access external services only accessible to the cluster while connected to Telepresence. These can be configured on a per-cluster basis and each subnet is added to the TUN device so that requests are routed to the cluster for IPs that fall within that subnet." - image: ./telepresence-2.3.1-alsoProxy.png - docs: reference/config - type: feature - - title: Mutating Webhook for Injecting Traffic Agents - body: "The Traffic Manager now contains a mutating webhook to automatically add an agent to pods that have the telepresence.getambassador.io/traffic-agent: enabled annotation. This enables Telepresence to work well with GitOps CD platforms that rely on higher level kubernetes objects matching what is stored in git. For workloads without the annotation, Telepresence will add the agent the way it has in the past" - image: ./telepresence-2.3.1-inject.png - docs: reference/rbac - type: feature - - title: Traffic Manager Connect Timeout - body: "The trafficManagerConnect timeout default has changed from 20 seconds to 60 seconds, in order to facilitate the extended time it takes to apply everything needed for the mutator webhook." - image: ./telepresence-2.3.1-trafficmanagerconnect.png - docs: reference/config - type: change - - title: Fix for large file transfers - body: "Fix a tun-device bug where sometimes large transfers from services on the cluster would hang indefinitely" - image: ./telepresence-2.3.1-large-file-transfer.png - docs: reference/tun-device - type: bugfix - - title: Brew Formula Changed - body: "Now that the Telepresence rewrite is the main version of Telepresence, you can install it via Brew like so: brew install datawire/blackbird/telepresence." - image: ./telepresence-2.3.1-brew.png - docs: install/ - type: change - - version: 2.3.0 - date: '2021-06-01' - notes: - - title: Brew install Telepresence - body: "Telepresence can now be installed via brew on macOS, which makes it easier for users to stay up-to-date with the latest telepresence version. To install via brew, you can use the following command: brew install datawire/blackbird/telepresence2." - image: ./telepresence-2.3.0-homebrew.png - docs: install/ - type: feature - - title: TCP and UDP routing via Virtual Network Interface - body: "Telepresence will now perform routing of outbound TCP and UDP traffic via a Virtual Network Interface (VIF). The VIF is a layer 3 TUN-device that exists while Telepresence is connected. It makes the subnets in the cluster available to the workstation and will also route DNS requests to the cluster and forward them to intercepted pods. This means that pods with custom DNS configuration will work as expected. Prior versions of Telepresence would use firewall rules and were only capable of routing TCP." - image: ./tunnel.jpg - docs: reference/tun-device - type: feature - - title: SSH is no longer used - body: "All traffic between the client and the cluster is now tunneled via the traffic manager gRPC API. This means that Telepresence no longer uses ssh tunnels and that the manager no longer have an sshd installed. Volume mounts are still established using sshfs but it is now configured to communicate using the sftp-protocol directly, which means that the traffic agent also runs without sshd. A desired side effect of this is that the manager and agent containers no longer need a special user configuration." - image: ./no-ssh.png - docs: reference/tun-device/#no-ssh-required - type: change - - title: Running in a Docker container - body: "Telepresence can now be run inside a Docker container. This can be useful for avoiding side effects on a workstation's network, establishing multiple sessions with the traffic manager, or working with different clusters simultaneously." - image: ./run-tp-in-docker.png - docs: reference/inside-container - type: feature - - title: Configurable Log Levels - body: "Telepresence now supports configuring the log level for Root Daemon and User Daemon logs. This provides control over the nature and volume of information that Telepresence generates in daemon.log and connector.log." - image: ./telepresence-2.3.0-loglevels.png - docs: reference/config/#log-levels - type: feature - - version: 2.2.2 - date: '2021-05-17' - notes: - - title: Legacy Telepresence subcommands - body: Telepresence is now able to translate common legacy Telepresence commands into native Telepresence commands. So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used to with the new Telepresence binary. - image: ./telepresence-2.2.png - docs: install/migrate-from-legacy/ - type: feature diff --git a/docs/v2.3/troubleshooting/index.md b/docs/v2.3/troubleshooting/index.md deleted file mode 100644 index 730cd866..00000000 --- a/docs/v2.3/troubleshooting/index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an intercept did not generate a preview URL - -Preview URLs can only be created if Telepresence is [logged in to -Ambassador Cloud](../reference/client/login/). When not logged in, it -will not even try to create a preview URL (additionally, by default it -will intercept all traffic rather than just a subset of the traffic). -Remove the intercept with `telepresence leave [deployment name]`, run -`telepresence login` to login to Ambassador Cloud, then recreate the -intercept. See the [intercepts how-to doc](../howtos/intercepts) for -more details. - -## Error on accessing preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on accessing preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Your GitHub organization isn't listed - -Ambassador Cloud needs access granted to your GitHub organization as a -third-party OAuth app. If an organization isn't listed during login -then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → -**Settings** → **Applications** → **Authorized OAuth Apps** → -**Ambassador Labs**. An organization owner will have a **Grant** -button, anyone not an owner will have **Request** which sends an email -to the owner. If an access request has been denied in the past the -user will not see the **Request** button, they will have to reach out -to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in; -you should see the GitHub organization listed. - -The organization owner can go to the **GitHub menu** → **Your -organizations** → **[org name]** → **Settings** → **Third-party -access** to see if Ambassador Labs has access already or authorize a -request for access (only owners will see **Settings** on the -organization page). Clicking the pencil icon will show the -permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or requesting access on initial login - -When using GitHub as your identity provider, the first time you log in -to Ambassador Cloud GitHub will ask to authorize Ambassador Labs to -access your organizations and certain user data. - - - -Any listed organization with a green check has already granted access -to Ambassador Labs (you still need to authorize to allow Ambassador -Labs to read your user data and organization membership). - -Any organization with a red "X" requires access to be granted to -Ambassador Labs. Owners of the organization will see a **Grant** -button. Anyone who is not an owner will see a **Request** button. -This will send an email to the organization owner requesting approval -to access the organization. If an access request has been denied in -the past the user will not see the **Request** button, they will have -to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud -then back in to select the organization. - -### Volume mounts are not working on macOS - -It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. Lately there's been some issues using `brew install sshfs` a macOS workstation because the required component `osxfuse` (now named `macfuse`) isn't open source and hence, no longer supported. As a workaround, you can now use `gromgit/fuse/sshfs-mac` instead. Follow these steps: - -1. Remove old sshfs, macfuse, osxfuse using `brew uninstall` -2. `brew install --cask macfuse` -3. `brew install gromgit/fuse/sshfs-mac` -4. `brew link --overwrite sshfs-mac` - -Now sshfs -V shows you the correct version, e.g.: -``` -$ sshfs -V -SSHFS version 2.10 -FUSE library version: 2.9.9 -fuse: no mount point -``` - -but one more thing must be done before it works OK: -5. Try a mount (or an intercept that performs a mount). It will fail because you need to give permission to “Benjamin Fleischer” to execute a kernel extension (a pop-up appears that takes you to the system preferences). -6. Approve the needed permission -7. Reboot your computer. diff --git a/docs/v2.3/tutorial.md b/docs/v2.3/tutorial.md deleted file mode 100644 index 85058ca5..00000000 --- a/docs/v2.3/tutorial.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for -your OS to install it and log in to Ambassador Cloud in your browser. -Follow the prompts to log in with GitHub then select your -organization. You will be redirected to the Ambassador Cloud -dashboard; later you will manage your preview URLs here. - -### macOS - -```shell -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -### Linux - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and - logging in to Ambassador Cloud. The Cloud dashboard is used to - manage your intercepts and share them with colleagues. You must be - logged in to create selective intercepts as we are going to do - here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/v2.3/versions.yml b/docs/v2.3/versions.yml deleted file mode 100644 index c26bd3e5..00000000 --- a/docs/v2.3/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.3.7" -dlVersion: "2.3.7" -docsVersion: "2.3" -branch: release/v2 -productName: "Telepresence" diff --git a/docs/v2.4/community.md b/docs/v2.4/community.md deleted file mode 100644 index 922457c9..00000000 --- a/docs/v2.4/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/v2.4/concepts/context-prop.md b/docs/v2.4/concepts/context-prop.md deleted file mode 100644 index dc9ee18f..00000000 --- a/docs/v2.4/concepts/context-prop.md +++ /dev/null @@ -1,36 +0,0 @@ -# Context propagation - -**Context propagation** is the transfer of request metadata across the services and remote processes of a distributed system. Telepresence uses context propagation to intelligently route requests to the appropriate destination. - -This metadata is the context that is transferred across system services. It commonly takes the form of HTTP headers; context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -Metadata propagation refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - - -## What is distributed tracing? - -Distributed tracing is a technique for troubleshooting and profiling distributed microservices applications and is a common application for context propagation. It is becoming a key component for debugging. - -In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) (or another header format, such as [B3 headers](https://github.com/openzipkin/b3-propagation)), which facilitates maintaining the headers through every service without being stripped (the propagation). - - -## What are intercepts and preview URLs? - -[Intercepts](../../reference/intercepts) and [preview -URLs](../../howtos/preview-urls/) are functions of Telepresence that -enable easy local development from a remote Kubernetes cluster and -offer a preview environment for sharing and real-time collaboration. - -Telepresence uses custom HTTP headers and header propagation to -identify which traffic to intercept both for plain personal intercepts -and for personal intercepts with preview URLs; these techniques are -more commonly used for distributed tracing, so what they are being -used for is a little unorthodox, but the mechanisms for their use are -already widely deployed because of the prevalence of tracing. The -headers facilitate the smart routing of requests either to live -services in the cluster or services running locally on a developer’s -machine. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to [Ambassador Cloud](https://app.getambassador.io) with other information about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. diff --git a/docs/v2.4/concepts/devloop.md b/docs/v2.4/concepts/devloop.md deleted file mode 100644 index 8b1fbf35..00000000 --- a/docs/v2.4/concepts/devloop.md +++ /dev/null @@ -1,50 +0,0 @@ -# The developer experience and the inner dev loop - -## How is the developer experience changing? - -The developer experience is the workflow a developer uses to develop, test, deploy, and release software. - -Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. - -The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, [controlled (canary) release](https://www.getambassador.io/docs/argo/latest/concepts/canary/), and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a [GitOps workflow](https://www.getambassador.io/docs/argo/latest/concepts/gitops/#what-is-gitops) and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. - -Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. - -Engineers now must design and build distributed service-based applications _and_ also assume responsibility for the full development life cycle. The new developer experience means that developers can no longer rely on monolithic application developer best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner development loop. Now developers have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this adds development time to the equation. - -## What is the inner dev loop? - -The inner dev loop is the single developer workflow. A single developer should be able to set up and use an inner dev loop to code and test changes quickly. - -Even within the Kubernetes space, developers will find much of the inner dev loop familiar. That is, code can still be written locally at a level that a developer controls and committed to version control. - -In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code — they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. - -![traditional inner dev loop](../../images/trad-inner-dev-loop.png) - -## In search of lost time: How does containerization change the inner dev loop? - -The inner dev loop is where writing and testing code happens, and time is critical for maximum developer productivity and getting features in front of end users. The faster the feedback loop, the faster developers can refactor and test again. - -Changes to the inner dev loop process, i.e., containerization, threaten to slow this development workflow down. Coding stays the same in the new inner dev loop, but code has to be containerized. The _containerized_ inner dev loop requires a number of new steps: - -* packaging code in containers -* writing a manifest to specify how Kubernetes should run the application (e.g., YAML-based configuration information, such as how much memory should be given to a container) -* pushing the container to the registry -* deploying containers in Kubernetes - -Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. - - -![container inner dev loop](../../images/container-inner-dev-loop.png) - -## Tackling the slow inner dev loop - -A slow inner dev loop can negatively impact frontend and backend teams, delaying work on individual and team levels and slowing releases into production overall. - -For example: - -* Frontend developers have to wait for previews of backend changes on a shared dev/staging environment (for example, until CI/CD deploys a new version) and/or rely on mocks/stubs/virtual services when coding their application locally. These changes are only verifiable by going through the CI/CD process to build and deploy within a target environment. -* Backend developers have to wait for CI/CD to build and deploy their app to a target environment to verify that their code works correctly with cluster or cloud-based dependencies as well as to share their work to get feedback. - -New technologies and tools can facilitate cloud-native, containerized development. And in the case of a sluggish inner dev loop, developers can accelerate productivity with tools that help speed the loop up again. diff --git a/docs/v2.4/concepts/devworkflow.md b/docs/v2.4/concepts/devworkflow.md deleted file mode 100644 index fa24fc2b..00000000 --- a/docs/v2.4/concepts/devworkflow.md +++ /dev/null @@ -1,7 +0,0 @@ -# The changing development workflow - -A changing workflow is one of the main challenges for developers adopting Kubernetes. Software development itself isn’t the challenge. Developers can continue to [code using the languages and tools with which they are most productive and comfortable](https://www.getambassador.io/resources/kubernetes-local-dev-toolkit/). That’s the beauty of containerized development. - -However, the cloud-native, Kubernetes-based approach to development means adopting a new development workflow and development environment. Beyond the basics, such as figuring out how to containerize software, [how to run containers in Kubernetes](https://www.getambassador.io/docs/kubernetes/latest/concepts/appdev/), and how to deploy changes into containers, for example, Kubernetes adds complexity before it delivers efficiency. The promise of a “quicker way to develop software” applies at least within the traditional aspects of the inner dev loop, where the single developer codes, builds and tests their software. But both within the inner dev loop and once code is pushed into version control to trigger the outer dev loop, the developer experience changes considerably from what many developers are used to. - -In this new paradigm, new steps are added to the inner dev loop, and more broadly, the developer begins to share responsibility for the full life cycle of their software. Inevitably this means taking new workflows and tools on board to ensure that the full life cycle continues full speed ahead. diff --git a/docs/v2.4/concepts/faster.md b/docs/v2.4/concepts/faster.md deleted file mode 100644 index b649e415..00000000 --- a/docs/v2.4/concepts/faster.md +++ /dev/null @@ -1,25 +0,0 @@ -# Making the remote local: Faster feedback, collaboration and debugging - -With the goal of achieving [fast, efficient development](https://www.getambassador.io/use-case/local-kubernetes-development/), developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. - -## How should I set up a Kubernetes development environment? - -[Setting up a development environment](https://www.getambassador.io/resources/development-environments-microservices/) for Kubernetes can be much more complex than the set up for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. - -While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. - -A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../../quick-start/) to facilitate fast feedback, debugging and collaboration. - -## What is Telepresence? - -Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../../quick-start/). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. - -## How can I get fast, efficient local development? - -The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. - -A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code, local debugging in production, and [preview URL](../../howtos/preview-urls/) functionality to share dev environments with others for multi-user collaboration. - -Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. - -The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts and preview URLs. diff --git a/docs/v2.4/concepts/intercepts.md b/docs/v2.4/concepts/intercepts.md deleted file mode 100644 index c39a80eb..00000000 --- a/docs/v2.4/concepts/intercepts.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: "Types of intercepts" -description: "Short demonstration of personal vs global intercepts" ---- - -import React from 'react'; - -import Alert from '@material-ui/lab/Alert'; -import AppBar from '@material-ui/core/AppBar'; -import InterceptAnimationSVG from '@src/assets/images/intercept-animation.inline.svg' -import Paper from '@material-ui/core/Paper'; -import Tab from '@material-ui/core/Tab'; -import TabContext from '@material-ui/lab/TabContext'; -import TabList from '@material-ui/lab/TabList'; -import TabPanel from '@material-ui/lab/TabPanel'; - -export function Animation(props) { - let el = React.useRef(null); - React.useEffect(() => { - const queueAnimation = () => { - setTimeout(() => { - el.current?.getAnimations({subtree: true})?.forEach((anim) => { - anim.finish(); - anim.play(); - }) - queueAnimation(); - }, 3000); - }; - queueAnimation(); - }, el); - return ( -
- -
- ); -}; - -export function TabsContainer({ children, ...props }) { - const [state, setState] = React.useState({curTab: "personal"}); - React.useEffect(() => { - const query = new URLSearchParams(window.location.search); - var interceptType = query.get('intercept') || "personal"; - if (state.curTab != interceptType) { - setState({curTab: interceptType}); - } - }, [state, setState]) - var setURL = function(newTab) { - history.replaceState(null,null, - `?intercept=${newTab}${window.location.hash}`, - ); - }; - return ( -
- - - {setState({curTab: newTab}); setURL(newTab)}} aria-label="intercept types"> - - - - - - {children} - -
- ); -}; - -# Types of intercepts - - - - -## No intercept - - - - -This is the normal operation of your cluster without Telepresence. - - - - - -## Global intercept - - - - -**Global intercepts** replace the Kubernetes "Orders" service with the -Orders service running on your laptop. The users see no change, but -with all the traffic coming to your laptop, you can observe and debug -with all your dev tools. - - - -### Creating and using global intercepts - - 1. Creating the intercept: Intercept your service from your CLI: - - ```shell - telepresence intercept SERVICENAME --http-match=all - ``` - - - - Make sure your current kubectl context points to the target - cluster. If your service is running in a different namespace than - your current active context, use or change the `--namespace` flag. - - - - 2. Using the intercept: Send requests to your service: - - All requests will be sent to the version of your service that is - running in the local development environment. - - - - -## Personal intercept - -**Personal intercepts** allow you to be selective and intercept only -some of the traffic to a service while not interfering with the rest -of the traffic. This allows you to share a cluster with others on your -team without interfering with their work. - - - - -In the illustration above, **Orange** -requests are being made by Developer 2 on their laptop and the -**green** are made by a teammate, -Developer 1, on a different laptop. - -Each developer can intercept the Orders service for their requests only, -while sharing the rest of the development environment. - - - -### Creating and using personal intercepts - - 1. Creating the intercept: Intercept your service from your CLI: - - ```shell - telepresence intercept SERVICENAME --http-match=Personal-Intercept=126a72c7-be8b-4329-af64-768e207a184b - ``` - - We're using - `Personal-Intercept=126a72c7-be8b-4329-af64-768e207a184b` as the - header for the sake of the example, but you can use any - `key=value` pair you want, or `--http-match=auto` to have it - choose something automatically. - - - - Make sure your current kubect context points to the target - cluster. If your service is running in a different namespace than - your current active context, use or change the `--namespace` flag. - - - - 2. Using the intercept: Send requests to your service by passing the - HTTP header: - - ```http - Personal-Intercept: 126a72c7-be8b-4329-af64-768e207a184b - ``` - - - - Need a browser extension to modify or remove an HTTP-request-headers? - - Chrome - {' '} - Firefox - - - - 3. Using the intercept: Send requests to your service without the - HTTP header: - - Requests without the header will be sent to the version of your - service that is running in the cluster. This enables you to share - the cluster with a team! - - - diff --git a/docs/v2.4/doc-links.yml b/docs/v2.4/doc-links.yml deleted file mode 100644 index bdadb764..00000000 --- a/docs/v2.4/doc-links.yml +++ /dev/null @@ -1,86 +0,0 @@ - - title: Quick start - link: quick-start - - title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Install Traffic Manager with Helm - link: install/helm/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ - - title: Create a local Go K8s dev environment - link: install/qs-go-advanced/ - - title: Create a local Java K8s dev environment - link: install/qs-java-advanced/ - - title: Core concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: 'Making the remote local: Faster feedback, collaboration and debugging' - link: concepts/faster - - title: Context propagation - link: concepts/context-prop - - title: Types of intercepts - link: concepts/intercepts - - title: How do I... - items: - - title: Intercept a service in your own environment - link: howtos/intercepts - - title: Share dev environments with preview URLs - link: howtos/preview-urls - - title: Proxy outbound traffic to my cluster - link: howtos/outbound - - title: Send requests to an intercepted service - link: howtos/request - - title: Technical reference - items: - - title: Architecture - link: reference/architecture - - title: Client reference - link: reference/client - items: - - title: login - link: reference/client/login - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Using Docker for intercepts - link: reference/docker-run - - title: Running Telepresence in a Docker container - link: reference/inside-container - - title: Environment variables - link: reference/environment - - title: Intercepts - link: reference/intercepts/ - items: - - title: Manually injecting the Traffic Agent - link: reference/intercepts/manual-agent - - title: Volume mounts - link: reference/volume - - title: RESTful API service - link: reference/restapi - - title: DNS resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: Telepresence and VPNs - link: reference/vpn - - title: Networking through Virtual Network Interface - link: reference/tun-device - - title: Connection Routing - link: reference/routing - - title: Using Telepresence with Linkerd - link: reference/linkerd - - title: FAQs - link: faqs - - title: Troubleshooting - link: troubleshooting - - title: Community - link: community - - title: Release Notes - link: release-notes diff --git a/docs/v2.4/faqs.md b/docs/v2.4/faqs.md deleted file mode 100644 index c98f2339..00000000 --- a/docs/v2.4/faqs.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What operating systems does Telepresence work on?** - -Telepresence currently works natively on macOS (Intel and Apple silicon), Linux, and WSL 2. Starting with v2.4.0, we are also releasing a native Windows version of Telepresence that we are considering a Developer Preview. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](https://www.getambassador.io/feedback/) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Why are my intercepts still reporting as active when they've been disconnected?** - - In certain cases, Telepresence might not have been able to communicate back with Ambassador Cloud to update the intercept's status. Worry not, they will get garbage collected after a period of time. - -** Why is my intercept associated with an "Unreported" cluster?** - - Intercepts tagged with "Unreported" clusters simply mean Ambassador Cloud was unable to associate a service instance with a known detailed service from an Edge Stack or API Gateway cluster. [Connecting your cluster to the Service Catalog](https://www.getambassador.io/docs/cloud/latest/service-catalog/quick-start/) will properly match your services from multiple data sources. - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the traffic-manager and traffic-agent images that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single `traffic-manager` service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the `traffic-manager` service installed in the cluster and `traffic-agent` containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established by using -the `kubectl port-forward` machinery (though without actually spawning -a separate program) to establish a TCP connection to Telepresence -Traffic Manager in the cluster, and running Telepresence's custom VPN -protocol over that TCP connection. - - - -** What identity providers are supported for authenticating to view a preview URL?** - -* GitHub -* GitLab -* Google - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](https://www.getambassador.io/feedback/) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Yes it is! You can find its source code on [GitHub](https://github.com/telepresenceio/telepresence). - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](https://www.getambassador.io/feedback/), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/v2.4/howtos/outbound.md b/docs/v2.4/howtos/outbound.md deleted file mode 100644 index e148023e..00000000 --- a/docs/v2.4/howtos/outbound.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Proxy outbound traffic to my cluster - -While preview URLs are a powerful feature, Telepresence offers other options for proxying traffic between your laptop and the cluster. This section discribes how to proxy outbound traffic and control outbound connectivity to your cluster. - - This guide assumes that you have the quick start sample web app running in your cluster to test accessing the web-app service. You can substitute this service for any other service you are running. - -## Proxying outbound traffic - -Connecting to the cluster instead of running an intercept allows you to access cluster workloads as if your laptop was another pod in the cluster. This enables you to access other Kubernetes services using `.`. A service running on your laptop can interact with other services on the cluster by name. - -When you connect to your cluster, the background daemon on your machine runs and installs the [Traffic Manager deployment](../../reference/architecture/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect` and enter your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.3.7 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground /home//.cache/telepresence/logs '' ''" - [sudo] password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -2. Run `telepresence status` to confirm connection to your cluster and that it is proxying traffic. - - ``` - $ telepresence status - Root Daemon: Running - Version : v2.3.7 (api 3) - Primary DNS : "" - Fallback DNS: "" - User Daemon: Running - Version : v2.3.7 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 0 total - ``` - -3. Access your service by name with `curl web-app.emojivoto:80`. Telepresence routes the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl web-app.emojivoto:80 - - - - - Emoji Vote - ... - ``` - -If you terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -When using Telepresence in this way, you need to access services with the namespace qualified DNS name (<service name>.<namespace>) before you start an intercept. After you start an intercept, only <service name> is required. Read more about these differences in the DNS resolution reference guide. - -## Controlling outbound connectivity - -By default, Telepresence provides access to all Services found in all namespaces in the connected cluster. This can lead to problems if the user does not have RBAC access permissions to all namespaces. You can use the `--mapped-namespaces ` flag to control which namespaces are accessible. - -When you use the `--mapped-namespaces` flag, you need to include all namespaces containing services you want to access, as well as all namespaces that contain services related to the intercept. - -### Using local-only intercepts - -When you develop on isolated apps or on a virtualized container, you don't need an outbound connection. However, when developing services that aren't deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service will be deployed. This is because services that aren't exposed through ingress controllers require connectivity to those services. When you provide outbound connectivity, the service can access other services in that namespace without using qualified names. A local-only intercept does not cause outbound connections to originate from the intercepted namespace. The reason for this is to establish correct origin; the connection must be routed to a `traffic-agent`of an intercepted pod. For local-only intercepts, the outbound connections originates from the `traffic-manager`. - -To control outbound connectivity to specific namespaces, add the `--local-only` flag: - - ``` - $ telepresence intercept --namespace --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. -You can deactivate the intercept with `telepresence leave `. This removes unqualified name access. - -### Proxy outcound connectivity for laptops - -To specify additional hosts or subnets that should be resolved inside of the cluster, see [AlsoProxy](../../reference/config/#alsoproxy) for more details. \ No newline at end of file diff --git a/docs/v2.4/howtos/preview-urls.md b/docs/v2.4/howtos/preview-urls.md deleted file mode 100644 index cad231fc..00000000 --- a/docs/v2.4/howtos/preview-urls.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Share development environments with preview URLs - -Telepresence can generate sharable preview URLs. This enables you to work on a copy of your service locally, and share that environment with a teammate for pair programming. While using preview URLs, Telepresence will route only the requests coming from that preview URL to your local environment. Requests to the ingress are routed to your cluster as usual. - -Preview URLs are protected behind authentication through Ambassador Cloud, and, access to the URL is only available to users in your organization. You can make the URL publicly accessible for sharing with outside collaborators. - -## Creating a preview URL - -1. Connect to Telepresence and enter the `telepresence list` command in your CLI to verify the service is listed. -Telepresence only supports Deployments, ReplicaSets, and StatefulSet workloads with a label that matches a Service. - -2. Enter `telepresence login` to launch Ambassador Cloud in your browser. - - If you are in an environment you can't launch Telepresence in your local browser, enter If you are in an environment where Telepresence cannot launch in a local browser, pass the [`--apikey` flag to `telepresence login`](../../reference/client/login/). - -3. Start the intercept with `telepresence intercept --port --env-file `and adjust the flags as follows: - Start the intercept: - * **port:** specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - * **env-file:** specify a file path for Telepresence to write the environment variables that are set in the pod. - -4. Answer the question prompts. - * **IWhat's your ingress' IP address?**: whether the ingress controller is expecting TLS communication on the specified port. - * **What's your ingress' TCP port number?**: the port your ingress controller is listening to. This is often 443 for TLS ports, and 80 for non-TLS ports. - * **Does that TCP port on your ingress use TLS (as opposed to cleartext)?**: whether the ingress controller is expecting TLS communication on the specified port. - * **If required by your ingress, specify a different hostname (TLS-SNI, HTTP "Host" header) to be used in requests.**: if your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), enter that value here. - - The example below shows a preview URL for `example-service` which listens on port 8080. The preview URL for ingress will use the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and the hostname `dev-environment.edgestack.me`: - - ```console -$ telepresence intercept example-service --port 8080 --env-file ~/ex-svc.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: example-service.default]: ambassador.ambassador - - 2/4: What's your ingress' TCP port number? - - [default: -]: 80 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using Deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - - Here are some examples of how to pass the environment variables to your local process: - * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). - * **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. - * **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Go to the Preview URL generated from the intercept. -Traffic is now intercepted from your preview URL without impacting other traffic from your Ingress. - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -7. Make a request on the URL you would usually query for that environment. Don't route a request to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) routes to services in the cluster like normal. - -8. Share with a teammate. - - You can collaborate with teammates by sending your preview URL to them. Once your teammate logs in, they must select the same identity provider and org as you are using. This authorizes their access to the preview URL. When they visit the preview URL, they see the intercepted service running on your laptop. - You can now collaborate with a teammate to debug the service on the shared intercept URL without impacting the production environment. - -## Sharing a preview URL with people outside your team - -To collaborate with someone outside of your identity provider's organization: -Log into [Ambassador Cloud](https://app.getambassador.io/cloud/). - navigate to your service's intercepts, select the preview URL details, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the preview URL either from the dashboard or by running `telepresence preview remove ` also removes all access to the preview URL. - -## Change access restrictions - -To collaborate with someone outside of your identity provider's organization, you must make your preview URL publicly accessible. - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/). -2. Select the service you want to share and open the service details page. -3. Click the **Intercepts** tab and expand the preview URL details. -4. Click **Make Publicly Accessible**. - -Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on a local environment. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. - -## Remove a preview URL from an Intercept - -To delete a preview URL and remove all access to the intercepted service, - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) -2. Click on the service you want to share and open the service details page. -3. Click the **Intercepts** tab and expand the preview URL details. -4. Click **Remove Preview**. - -Alternatively, you can remove a preview URL with the following command: -`telepresence preview remove ` diff --git a/docs/v2.4/howtos/request.md b/docs/v2.4/howtos/request.md deleted file mode 100644 index 1109c68d..00000000 --- a/docs/v2.4/howtos/request.md +++ /dev/null @@ -1,12 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Send requests to an intercepted service - -Ambassador Cloud can inform you about the required request parameters to reach an intercepted service. - - 1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) - 2. Navigate to the desired service Intercepts page - 3. Click the **Query** button to open the pop-up menu. - 4. Toggle between **CURL**, **Headers** and **Browse**. - -The pre-built queries and header information will help you get started to query the desired intercepted service and manage header propagation. diff --git a/docs/v2.4/images/container-inner-dev-loop.png b/docs/v2.4/images/container-inner-dev-loop.png deleted file mode 100644 index 06586cd6..00000000 Binary files a/docs/v2.4/images/container-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.4/images/github-login.png b/docs/v2.4/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/v2.4/images/github-login.png and /dev/null differ diff --git a/docs/v2.4/images/logo.png b/docs/v2.4/images/logo.png deleted file mode 100644 index 701f63ba..00000000 Binary files a/docs/v2.4/images/logo.png and /dev/null differ diff --git a/docs/v2.4/images/split-tunnel.png b/docs/v2.4/images/split-tunnel.png deleted file mode 100644 index 5bf30378..00000000 Binary files a/docs/v2.4/images/split-tunnel.png and /dev/null differ diff --git a/docs/v2.4/images/trad-inner-dev-loop.png b/docs/v2.4/images/trad-inner-dev-loop.png deleted file mode 100644 index 618b674f..00000000 Binary files a/docs/v2.4/images/trad-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.4/images/tunnelblick.png b/docs/v2.4/images/tunnelblick.png deleted file mode 100644 index 8944d445..00000000 Binary files a/docs/v2.4/images/tunnelblick.png and /dev/null differ diff --git a/docs/v2.4/images/vpn-dns.png b/docs/v2.4/images/vpn-dns.png deleted file mode 100644 index eed535c4..00000000 Binary files a/docs/v2.4/images/vpn-dns.png and /dev/null differ diff --git a/docs/v2.4/install/helm.md b/docs/v2.4/install/helm.md deleted file mode 100644 index 688d2f20..00000000 --- a/docs/v2.4/install/helm.md +++ /dev/null @@ -1,181 +0,0 @@ -# Install with Helm - -[Helm](https://helm.sh) is a package manager for Kubernetes that automates the release and management of software on Kubernetes. The Telepresence Traffic Manager can be installed via a Helm chart with a few simple steps. - -**Note** that installing the Traffic Manager through Helm will prevent `telepresence connect` from ever upgrading it. If you wish to upgrade a Traffic Manager that was installed via the Helm chart, please see the steps [below](#upgrading-the-traffic-manager) - -For more details on what the Helm chart installs and what can be configured, see the Helm chart [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). - -## Before you begin - -The Telepresence Helm chart is hosted by Ambassador Labs and published at `https://app.getambassador.io`. - -Start by adding this repo to your Helm client with the following command: - -```shell -helm repo add datawire https://app.getambassador.io -helm repo update -``` - -## Install with Helm - -When you run the Helm chart, it installs all the components required for the Telepresence Traffic Manager. - -1. If you are installing the Telepresence Traffic Manager **for the first time on your cluster**, create the `ambassador` namespace in your cluster: - - ```shell - kubectl create namespace ambassador - ``` - -2. Install the Telepresence Traffic Manager with the following command: - - ```shell - helm install traffic-manager --namespace ambassador datawire/telepresence - ``` - -### Install into custom namespace - -The Helm chart supports being installed into any namespace, not necessarily `ambassador`. Simply pass a different `namespace` argument to `helm install`. -For example, if you wanted to deploy the traffic manager to the `staging` namespace: - -```bash -helm install traffic-manager --namespace staging datawire/telepresence -``` - -Note that users of Telepresence will need to configure their kubeconfig to find this installation of the Traffic Manager: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` - -See [the kubeconfig documentation](../../reference/config#manager) for more information. - -### Upgrading the Traffic Manager. - -Versions of the Traffic Manager Helm chart are coupled to the versions of the Telepresence CLI that they are intended for. -Thus, for example, if you wish to use Telepresence `v2.4.0`, you'll need to install version `v2.4.0` of the Traffic Manager Helm chart. - -Upgrading the Traffic Manager is the same as upgrading any other Helm chart; for example, if you installed the release into the `ambassador` namespace, and you just wished to upgrade it to the latest version without changing any configuration values: - -```shell -helm repo up -helm upgrade traffic-manager datawire/telepresence --reuse-values --namespace ambassador -``` - -If you want to upgrade the Traffic-Manager to a specific version, add a `--version` flag with the version number to the upgrade command. For example: `--version v2.4.1` - -## RBAC - -### Installing a namespace-scoped traffic manager - -You might not want the Traffic Manager to have permissions across the entire kubernetes cluster, or you might want to be able to install multiple traffic managers per cluster (for example, to separate them by environment). -In these cases, the traffic manager supports being installed with a namespace scope, allowing cluster administrators to limit the reach of a traffic manager's permissions. - -For example, suppose you want a Traffic Manager that only works on namespaces `dev` and `staging`. -To do this, create a `values.yaml` like the following: - -```yaml -managerRbac: - create: true - namespaced: true - namespaces: - - dev - - staging -``` - -This can then be installed via: - -```bash -helm install traffic-manager --namespace staging datawire/telepresence -f ./values.yaml -``` - -**NOTE** Do not install namespace-scoped Traffic Managers and a global Traffic Manager in the same cluster, as it could have unexpected effects. - -#### Namespace collision detection - -The Telepresence Helm chart will try to prevent namespace-scoped Traffic Managers from managing the same namespaces. -It will do this by creating a ConfigMap, called `traffic-manager-claim`, in each namespace that a given install manages. - -So, for example, suppose you install one Traffic Manager to manage namespaces `dev` and `staging`, as: - -```bash -helm install traffic-manager --namespace dev datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={dev,staging}' -``` - -You might then attempt to install another Traffic Manager to manage namespaces `staging` and `prod`: - -```bash -helm install traffic-manager --namespace prod datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={staging,prod}' -``` - -This would fail with an error: - -``` -Error: rendered manifests contain a resource that already exists. Unable to continue with install: ConfigMap "traffic-manager-claim" in namespace "staging" exists and cannot be imported into the current release: invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-namespace" must equal "prod": current value is "dev" -``` - -To fix this error, fix the overlap either by removing `staging` from the first install, or from the second. - -#### Namespace scoped user permissions - -Optionally, you can also configure user rbac to be scoped to the same namespaces as the manager itself. -You might want to do this if you don't give your users permissions throughout the cluster, and want to make sure they only have the minimum set required to perform telepresence commands on certain namespaces. - -Continuing with the `dev` and `staging` example from the previous section, simply add the following to `values.yaml` (make sure you set the `subjects`!): - -```yaml -clientRbac: - create: true - - # These are the users or groups to which the user rbac will be bound. - # This MUST be set. - subjects: {} - # - kind: User - # name: jane - # apiGroup: rbac.authorization.k8s.io - - namespaced: true - - namespaces: - - dev - - staging -``` - -#### Namespace-scoped webhook - -If you wish to use the traffic-manager's [mutating webhook](../../reference/cluster-config#mutating-webhook) with a namespace-scoped traffic manager, you will have to ensure that each namespace has an `app.kubernetes.io/name` label that is identical to its name: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: staging - labels: - app.kubernetes.io/name: staging -``` - -You can also use `kubectl label` to add the label to an existing namespace, e.g.: - -```shell -kubectl label namespace staging app.kubernetes.io/name=staging -``` - -This is required because the mutating webhook will use the name label to find namespaces to operate on. - -**NOTE** This labelling happens automatically in kubernetes >= 1.21. - -### Installing RBAC only - -Telepresence Traffic Manager does require some [RBAC](../../reference/rbac/) for the traffic-manager deployment itself, as well as for users. -To make it easier for operators to introspect / manage RBAC separately, you can use `rbac.only=true` to -only create the rbac-related objects. -Additionally, you can use `clientRbac.create=true` and `managerRbac.create=true` to toggle which subset(s) of RBAC objects you wish to create. diff --git a/docs/v2.4/install/index.md b/docs/v2.4/install/index.md deleted file mode 100644 index 355ad2c5..00000000 --- a/docs/v2.4/install/index.md +++ /dev/null @@ -1,152 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install - -Install Telepresence by running the commands below for your OS. If you are not the administrator of your cluster, you will need [administrative RBAC permissions](../reference/rbac#administrating-telepresence) to install and use Telepresence in your cluster. - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## What's Next? - -Follow one of our [quick start guides](../quick-start/) to start using Telepresence, either with our sample app or in your own environment. - -## Installing nightly versions of Telepresence - -We build and publish the contents of the default branch, [release/v2](https://github.com/telepresenceio/telepresence), of Telepresence -nightly, Monday through Friday, for macOS (Intel and Apple silicon), Linux, and Windows. - -The tags are formatted like so: `vX.Y.Z-nightly-$gitShortHash`. - -`vX.Y.Z` is the most recent release of Telepresence with the patch version (Z) bumped one higher. -For example, if our last release was 2.3.4, nightly builds would start with v2.3.5, until a new -version of Telepresence is released. - -`$gitShortHash` will be the short hash of the git commit of the build. - -Use these URLs to download the most recent nightly build. - - - - -```shell -# Intel Macs -https://app.getambassador.io/download/tel2/darwin/amd64/nightly/telepresence - -# Apple silicon Macs -https://app.getambassador.io/download/tel2/darwin/arm64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/windows/amd64/nightly/telepresence.zip -``` - - - - -## Installing older versions of Telepresence - -Use these URLs to download an older version for your OS (including older nightly builds), replacing `x.y.z` with the versions you want. - - - - -```shell -# Intel Macs -https://app.getambassador.io/download/tel2/darwin/amd64/x.y.z/telepresence - -# Apple silicon Macs -https://app.getambassador.io/download/tel2/darwin/arm64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/windows/amd64/x.y.z/telepresence -``` - - - diff --git a/docs/v2.4/install/migrate-from-legacy.md b/docs/v2.4/install/migrate-from-legacy.md deleted file mode 100644 index 0f227f2a..00000000 --- a/docs/v2.4/install/migrate-from-legacy.md +++ /dev/null @@ -1,109 +0,0 @@ -# Migrate from legacy Telepresence - -Telepresence (formerly referenced as Telepresence 2, which is the current major version) has different mechanics and requires a different mental model from [legacy Telepresence 1](https://www.telepresence.io/docs/v1/) when working with local instances of your services. - -In legacy Telepresence, a pod running a service was swapped with a pod running the Telepresence proxy. This proxy received traffic intended for the service, and sent the traffic onward to the target workstation or laptop. We called this mechanism "swap-deployment". - -In practice, this mechanism, while simple in concept, had some challenges. Losing the connection to the cluster would leave the deployment in an inconsistent state. Swapping the pods would take time. - -Telepresence 2 introduces a [new -architecture](../../reference/architecture/) built around "intercepts" -that addresses these problems. With the new Telepresence, a sidecar -proxy ("traffic agent") is injected onto the pod. The proxy then -intercepts traffic intended for the Pod and routes it to the -workstation/laptop. The advantage of this approach is that the -service is running at all times, and no swapping is used. By using -the proxy approach, we can also do personal intercepts, where rather -than re-routing all traffic to the laptop/workstation, it only -re-routes the traffic designated as belonging to that user, so that -multiple developers can intercept the same service at the same time -without disrupting normal operation or disrupting eacho. - -Please see [the Telepresence quick start](../../quick-start/) for an introduction to running intercepts and [the intercept reference doc](../../reference/intercepts/) for a deep dive into intercepts. - -## Using legacy Telepresence commands - -First please ensure you've [installed Telepresence](../). - -Telepresence is able to translate common legacy Telepresence commands into native Telepresence commands. -So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used -to with the Telepresence binary. - -For example, say you have a deployment (`myserver`) that you want to swap deployment (equivalent to intercept in -Telepresence) with a python server, you could run the following command: - -``` -$ telepresence --swap-deployment myserver --expose 9090 --run python3 -m http.server 9090 -< help text > - -Legacy telepresence command used -Command roughly translates to the following in Telepresence: -telepresence intercept echo-easy --port 9090 -- python3 -m http.server 9090 -running... -Connecting to traffic manager... -Connected to context -Using Deployment myserver -intercepted - Intercept name : myserver - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:9090 - Intercepting : all TCP connections -Serving HTTP on :: port 9090 (http://[::]:9090/) ... -``` - -Telepresence will let you know what the legacy Telepresence command has mapped to and automatically -runs it. So you can get started with Telepresence today, using the commands you are used to -and it will help you learn the Telepresence syntax. - -### Legacy command mapping - -Below is the mapping of legacy Telepresence to Telepresence commands (where they exist and -are supported). - -| Legacy Telepresence Command | Telepresence Command | -|--------------------------------------------------|--------------------------------------------| -| --swap-deployment $workload | intercept $workload | -| --expose localPort[:remotePort] | intercept --port localPort[:remotePort] | -| --swap-deployment $workload --run-shell | intercept $workload -- bash | -| --swap-deployment $workload --run $cmd | intercept $workload -- $cmd | -| --swap-deployment $workload --docker-run $cmd | intercept $workload --docker-run -- $cmd | -| --run-shell | connect -- bash | -| --run $cmd | connect -- $cmd | -| --env-file,--env-json | --env-file, --env-json (haven't changed) | -| --context,--namespace | --context, --namespace (haven't changed) | -| --mount,--docker-mount | --mount, --docker-mount (haven't changed) | - -### Legacy Telepresence command limitations - -Some of the commands and flags from legacy Telepresence either didn't apply to Telepresence or -aren't yet supported in Telepresence. For some known popular commands, such as --method, -Telepresence will include output letting you know that the flag has gone away. For flags that -Telepresence can't translate yet, it will let you know that that flag is "unsupported". - -If Telepresence is missing any flags or functionality that is integral to your usage, please let us know -by [creating an issue](https://github.com/telepresenceio/telepresence/issues) and/or talking to us on our [Slack channel](https://a8r.io/Slack)! - -## Telepresence changes - -Telepresence installs a Traffic Manager in the cluster and Traffic Agents alongside workloads when performing intercepts (including -with `--swap-deployment`) and leaves them. If you use `--swap-deployment`, the intercept will be left once the process -dies, but the agent will remain. There's no harm in leaving the agent running alongside your service, but when you -want to remove them from the cluster, the following Telepresence command will help: -``` -$ telepresence uninstall --help -Uninstall telepresence agents and manager - -Usage: - telepresence uninstall [flags] { --agent |--all-agents | --everything } - -Flags: - -d, --agent uninstall intercept agent on specific deployments - -a, --all-agents uninstall intercept agent on all deployments - -e, --everything uninstall agents and the traffic manager - -h, --help help for uninstall - -n, --namespace string If present, the namespace scope for this CLI request -``` - -Since the new architecture deploys a Traffic Manager into the Ambassador namespace, please take a look at -our [rbac guide](../../reference/rbac) if you run into any issues with permissions while upgrading to Telepresence. diff --git a/docs/v2.4/install/qs-go-advanced.md b/docs/v2.4/install/qs-go-advanced.md deleted file mode 100644 index 8c432219..00000000 --- a/docs/v2.4/install/qs-go-advanced.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -description: "Create your complete Kubernetes development environment and use Telepresence to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Creating a local Go Kubernetes development environment - -This tutorial shows you how to use Ambassador Cloud to create an effective Kubernetes development environment to enable fast, local development with the ability to interact with services and dependencies that run in a remote Kubernetes cluster. - -For the hands-on part of this guide, you will build upon [this tutorial with the emojivoto application](../../quick-start/go/), which is written in Go. - -## Prerequisites - -To begin, you need a set of services that you can deploy to a Kubernetes cluster. These services must be: - -* [Containerized](https://www.getambassador.io/learn/kubernetes-glossary/container/). - - Best practices for [writing Dockerfiles](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/). - - Many modern code editors, such as [VS Code](https://code.visualstudio.com/docs/containers/overview) and [IntelliJ IDEA](https://code.visualstudio.com/docs/containers/overview), can automatically generate Dockerfiles. -* Have a Kubernetes manifest that can be used to successfully deploy your application to a Kubernetes cluster. This includes YAML config files, or Helm charts, or whatever method you prefer. - - Many modern code editors, such as VS Code, have [plugins](https://marketplace.visualstudio.com/items?itemName=ms-kubernetes-tools.vscode-kubernetes-tools) that will [automatically generate](https://marketplace.visualstudio.com/items?itemName=GoogleCloudTools.cloudcode) a large amount of the Service and Deployment configuration files. - - The kubectl command-line tool includes a number of [config generators](https://kubernetes.io/docs/reference/kubectl/conventions/#generators) for creating basic Service and Deployment files. - - For helm users, the [`helm create` command](https://helm.sh/docs/helm/helm_create/) can be used to create the directory and file scaffolding for your chart. -* Follow cloud native application architecture best practices. - - Design services using the [Twelve-Factor Application](https://12factor.net/) approach. - - Ensure that your services and ingress gateway include HTTP [header propagation](https://www.getambassador.io/learn/kubernetes-glossary/header-propagation/) for good observability and diagnostics. Many modern language-specific web frameworks support this out-of-the-box, and the [OpenTelemetry documentation](https://opentelemetry.lightstep.com/core-concepts/context-propagation/) also contains good guidance. - -The emojivoto example you are exploring in the steps below follows all of these prerequisites. - -## Deploy your application to a remote Kubernetes cluster - -First, ensure that your entire application is running in a Kubernetes cluster and available for access to either your users or to yourself acting as a user. - -Use your existing `kubectl apply`, `helm install`, or continuous deployment system to deploy your entire application to the remote cluster: - -1. Ensure that you have set the correct KUBECONFIG in your local command line/shell in order to ensure your local tooling is interacting with the correct Kubernetes cluster. Verify this by executing `kubectl cluster-info` or `kubectl get svc`. -2. Deploy your application (using kubectl, helm or your CD system), and verify that the services are running with `kubectl get svc`. -3. Verify that you can access the application running by visiting the Ingress IP or domain name. We’ll refer to this as ${INGRESS_IP} from now on. - -If you followed the [emojivoto application tutorial](../../quick-start/go/) referenced at the beginning of this guide, you will see that your Kubernetes cluster has all of the necessary services deployed and has the ingress configured to expose your application by way of an IP address. - -## Create a local development container to modify a service - -After you finish your deployment, you need to configure a copy of a single service and run it locally. This example shows you how to do this in a development container with a sample repository. Unlike a production container, a development container contains the full development toolchain and dependencies required to build and run your application. - - -1. Clone your code in your repository with `git clone `. - For example: `git clone https://github.com/danielbryantuk/emojivoto.git`. -2. Change your directory to the source directory with `cd `. - To follow the previous example, enter: `cd emojivoto-voting-svc/api` -3. Ensure that your development environment is configured to support the automatic reloading of the service when your source code changes. - In the example, the Go applicationapplication source code is being monitored for changes, and the application is rebuilt with [Air's live-reloading utility](https://github.com/cosmtrek/air). -4. Add a Dockerfile for your development. - Alternatively, you can use a Cloud Native Buildpack, such as those provided by Google Cloud. The [Google Go buildpack](https://github.com/GoogleCloudPlatform/buildpacks) has live-reloading configured by default. -5. Next, test that the container is working properly. In the root directory of your source rep, enter: -`docker build -t example-dev-container:0.1 -f Dev.Dockerfile .` -If you ran the the [emojivoto application example](../../quick-start/go/), the container has already been built for you and you can skip this step. -6. Run the development container and mount the current directory as a volume. This way, any code changes you make locally are synchronized into the container. Enter: - `docker run -v $(pwd):/opt/emojivoto/emojivoto-voting-svc/api datawire/telepresence-emojivoto-go-demo` - Now, code changes you make locally trigger a reload of the application in the container. -7. Open the current directory with your source code in your IDE. Make a change to the source code and trigger a build/compilation. - The container logs show that the application has been reloaded. - -If you followed the [emojivoto application tutorial](../../quick-start/go/) referenced at the beginning of this guide, the emojivoto development container is already downloaded. When you examine the `docker run` command you executed, you can see an AMBASSADOR_API_KEY token included as an environment variable. Copy and paste this into the example command below. Clone the emojivoto code repo and run the container with the updated configuration to expose the application's ports locally and volume mount your local copy of the application source code into the container: -``` -$ git clone git@github.com:danielbryantuk/emojivoto.git -$ cd emojivoto-voting-svc/api -$ docker run -d -p8083:8083 -p8081:8081 --name voting-demo --cap-add=NET_ADMIN --device /dev/net/tun:/dev/net/tun --pull always --rm -it -e AMBASSADOR_API_KEY= -v ~/Library/Application\ Support:/root/.host_config -v $(pwd):/opt/emojivoto/emojivoto-voting-svc/api datawire/telepresence-emojivoto-go-demo -``` - -## Connect your local development environment to the remote cluster - -Once you have the development container running, you can integrate your local development environment and the remote cluster. This enables you to access your remote app and instantly see any local changes you have made using your development container. - -1. First, download the latest [Telepresence binary](../../install/) for your operating system and run `telepresence connect`. - Your local service is now able to interact with services and dependencies in your remote cluster. - For example, you can run `curl remote-service-name.namespace:port/path` and get an instant response locally in the same way you would in a remote cluster. -2. Extract the KUBECONFIG from your dev container from the [emojivoto application tutorial](../../quick-start/go/) and then connect your container to the remote cluster with Telepresence: - ``` - $ CONTAINER_ID=$(docker inspect --format="{{.Id}}" "/voting-demo") - $ docker cp $CONTAINER_ID:/opt/telepresence-demo-cluster.yaml ./emojivoto_k8s_context.yaml - ``` -3. Run `telepresence intercept your-service-name` to reroute traffic for the service you’re working on: - ``` - $ telepresence intercept voting --port 8081:8080 - ``` -4. Make a small change in your local code that will cause a visible change that you will be able to see when accessing your app. Build your service to trigger a reload within the container. -5. Now visit your ${INGRESS_IP} and view the change. - Notice the instant feedback of a local change combined with being able to access the remote dependencies! -6. Make another small change in your local code and build the application again. -Refresh your view of the app at ${INGRESS_IP}. - Notice that you didn’t need to re-deploy the container in the remote cluster to view your changes. Any request you make against the remote application that accesses your service will be routed to your local machine allowing you to instantly see the effects of changes you make to the code. -7. Now, put all these commands in a simple shell script, setup-dev-env.sh, which can auto-install Telepresence and configure your local development environment in one command. You can commit this script into your application’s source code repository and your colleagues can easily take advantage of this fast development loop you have created. An example script is included below, which follows the “[Do-nothing scripting](https://blog.danslimmon.com/2019/07/15/do-nothing-scripting-the-key-to-gradual-automation/)"" format from Dan Slimmon: - - ``` - #!/bin/bash - - # global vars - CONTAINER_ID='' - - check_init_config() { - if [[ -z "${AMBASSADOR_API_KEY}" ]]; then - # you will need to set the AMBASSADOR_API_KEY via the command line - # export AMBASSADOR_API_KEY='NTIyOWExZDktYTc5...' - echo 'AMBASSADOR_API_KEY is not currently defined. Please set the environment variable in the shell e.g.' - echo 'export AMBASSADOR_API_KEY=NTIyOWExZDktYTc5...' - exit - fi - } - - run_dev_container() { - echo 'Running dev container (and downloading if necessary)' - - # check if dev container is already running and kill if so - CONTAINER_ID=$(docker inspect --format="{{.Id}}" "/voting-demo" > /dev/null 2>&1 ) - if [ ! -z "$CONTAINER_ID" ]; then - docker kill $CONTAINER_ID - fi - - # run the dev container, exposing 8081 gRPC port and volume mounting code directory - CONTAINER_ID=$(docker run -d -p8083:8083 -p8081:8081 --name voting-demo --cap-add=NET_ADMIN --device /dev/net/tun:/dev/net/tun --pull always --rm -it -e AMBASSADOR_API_KEY=$AMBASSADOR_API_KEY -v ~/Library/Application\ Support:/root/.host_config -v $(pwd):/opt/emojivoto/emojivoto-voting-svc/api datawire/telepresence-emojivoto-go-demo) - } - - connect_to_k8s() { - echo 'Extracting KUBECONFIG from container and connecting to cluster' - until docker cp $CONTAINER_ID:/opt/telepresence-demo-cluster.yaml ./emojivoto_k8s_context.yaml > /dev/null 2>&1; do - echo '.' - sleep 1s - done - - export KUBECONFIG=./emojivoto_k8s_context.yaml - - echo 'Connected to cluster. Listing services in default namespace' - kubectl get svc - } - - install_telepresence() { - echo 'Configuring Telepresence' - if [ ! command -v telepresence &> /dev/null ]; then - echo "Installing Telepresence" - sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/2.4.11/telepresence -o /usr/local/bin/telepresence - sudo chmod a+x /usr/local/bin/telepresence - else - echo "Telepresence already installed" - fi - } - - connect_local_dev_env_to_remote() { - export KUBECONFIG=./emojivoto_k8s_context.yaml - echo 'Connecting local dev env to remote K8s cluster' - telepresence intercept voting --port 8081:8080 - } - - open_editor() { - echo 'Opening editor' - - # replace this line with your editor of choice, e.g. VS code, Intelli J - code . - } - - display_instructions_to_user () { - echo '' - echo 'INSTRUCTIONS FOR DEVELOPMENT' - echo '============================' - echo 'To set the correct Kubernetes context on this shell, please execute:' - echo 'export KUBECONFIG=./emojivoto_k8s_context.yaml' - } - - check_init_config - run_dev_container - connect_to_k8s - install_telepresence - connect_local_dev_env_to_remote - open_editor - display_instructions_to_user - - # happy coding! - - ``` -8. Run the setup-dev-env.sh script locally. Use the $AMBASSADOR_API_KEY you created from Docker in the [emojivoto application tutorial](../../quick-start/go/) or in [Ambassador Cloud](https://app.getambassador.io/cloud/services/). - ``` - export AMBASSADOR_API_KEY= - git clone git@github.com:danielbryantuk/emojivoto.git - cd emojivoto-voting-svc/api - ./setup_dev_env.sh - ``` - - If you are not using Mac OS and not using VS Code, you will need to update the script to download the correct Telepresence binary for your OS and open the correct editor, respectively - - -## Share the result of your local changes with others - -Once you have your local development environment configured for fast feedback, you can securely share access and the ability to view the changes made in your local service with your teammates and stakeholders. - -1. Leave any current Telepresence intercepts you have running: - `telepresence leave your-service-name` -2. Login to Ambassador Cloud using your GitHub account that is affiliated with your organization. This is important because in order to secure control access to your local changes only people with a GitHub account that shares the same organization will be able to view this. - Run `telepresence login`. -3. Run `telepresence intercept your-service-name` again to reroute traffic for the service you’re working on. This time you will be required to answer several questions about your ingress configuration. -4. Once the command completes, take the “previewURL” that was generated as part of the output and share this with your teammates. Ask them to access the application via this URL (rather than the regular application URL). -5. Make a small change in your local code that causes a visible change that you can see when accessing your app. Build your service to trigger a reload within the container. -6. Run the following three commands to generate a link to share with your teammates: - ``` - $ telepresence leave voting - $ telepresence login - $ telepresence intercept voting --port 8081:8080 - ``` -7. Ask your teammates to refresh their view of the application and instantly see the local changes you’ve made. - -## What's Next? - -Learn more about creating intercepts in your Telepresence environment with the [Intercept a service in your own environment](../../howtos/intercepts/) documentation. diff --git a/docs/v2.4/install/qs-java-advanced.md b/docs/v2.4/install/qs-java-advanced.md deleted file mode 100644 index 8aa0ce13..00000000 --- a/docs/v2.4/install/qs-java-advanced.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -description: "Create your complete Kubernetes development environment and use Telepresence to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -# Creating a local Kubernetes development environment - -This tutorial shows you how to use Ambassador Cloud to create an effective Kubernetes development environment to enable fast, local development with the ability to interact with services and dependencies that run in a remote Kubernetes cluster. - -## Prerequisites - -To begin, you need a set of services that you can deploy to a Kubernetes cluster. These services must be: - -* [Containerized](https://www.getambassador.io/learn/kubernetes-glossary/container/). - - Best practices for [writing Dockerfiles](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/). - - Many modern code editors, such as [VS Code](https://code.visualstudio.com/docs/containers/overview) and [IntelliJ IDEA](https://code.visualstudio.com/docs/containers/overview), can automatically generate Dockerfiles. -* Have a Kubernetes manifest that can be used to successfully deploy your application to a Kubernetes cluster. This includes YAML config files, or Helm charts, or whatever method you prefer. - - Many modern code editors, such as VS Code, have [plugins](https://marketplace.visualstudio.com/items?itemName=ms-kubernetes-tools.vscode-kubernetes-tools) that will [automatically generate](https://marketplace.visualstudio.com/items?itemName=GoogleCloudTools.cloudcode) a large amount of the Service and Deployment configuration files. - - The kubectl command-line tool includes a number of [config generators](https://kubernetes.io/docs/reference/kubectl/conventions/#generators) for creating basic Service and Deployment files. - - For helm users, the [`helm create` command](https://helm.sh/docs/helm/helm_create/) can be used to create the directory and file scaffolding for your chart. -* Follow cloud native application architecture best practices. - - Design services using the [Twelve-Factor Application](https://12factor.net/) approach. - - Ensure that your services and ingress gateway include HTTP [header propagation](https://www.getambassador.io/learn/kubernetes-glossary/header-propagation/) for good observability and diagnostics. Many modern language-specific web frameworks support this out-of-the-box, and the [OpenTelemetry documentation](https://opentelemetry.lightstep.com/core-concepts/context-propagation/) also contains good guidance. - -## Deploy your application to a remote Kubernetes cluster - -First, ensure that your entire application is running in a Kubernetes cluster and available for access to either your users or to yourself acting as a user. - -Use your existing `kubectl apply`, `helm install`, or continuous deployment system to deploy your entire application to the remote cluster: - -1. Ensure that you have set the correct KUBECONFIG in your local command line/shell in order to ensure your local tooling is interacting with the correct Kubernetes cluster. Verify this by executing `kubectl cluster-info` or `kubectl get svc`. -2. Deploy your application (using kubectl, helm or your CD system), and verify that the services are running with `kubectl get svc`. -3. Verify that you can access the application running by visiting the Ingress IP or domain name. We’ll refer to this as ${INGRESS_IP} from now on. - -## Create a local development container to modify a service - -After you finish your deployment, you need to configure a copy of a single service and run it locally. This example shows you how to do this in a development container with a sam[;e repository. Unlike a production container, a development container contains the full development toolchain and dependencies required to build and run your application. - - -1. Clone your code in your repository with `git clone `. - For example: `git clone https://github.com/danielbryantuk/gs-spring-boot.git`. -2. Change your directory to the source directory with `cd `. - To follow the previous example, enter: `cd gs-spring-boot/complete` -3. Ensure that your development environment is configured to support the automatic reloading of the service when your source code changes. - In the example Spring Boot app this is as simple as [adding the spring-boot-devtools dependency to the pom.xml file](https://docs.spring.io/spring-boot/docs/1.5.16.RELEASE/reference/html/using-boot-devtools.html). -4. Add a Dockerfile for your development. - To distinguish this from your production Dockerfile, give the development Dockerfile a separate name, like “Dev.Dockerfile”. - The following is an example for Java: - ```Java - FROM openjdk:16-alpine3.13 - - WORKDIR /app - - COPY .mvn/ .mvn - COPY mvnw pom.xml ./ - RUN ./mvnw dependency:go-offline - - COPY src ./src - - CMD ["./mvnw", "spring-boot:run"] - ``` -5. Next, test that the container is working properly. In the root directory of your source rep, enter: -`docker build -t example-dev-container:0.1 -f Dev.Dockerfile .` -6. Run the development container and mount the current directory as a volume. This way, any code changes you make locally are synchronized into the container. Enter: - `docker run -v $(pwd):/app example-dev-container:0.1` - Now, code changes you make locally trigger a reload of the application in the container. -7. Open the current directory with your source code in your IDE. Make a change to the source code and trigger a build/compilation. - The container logs show that the application has been reloaded. - -## Connect your local development environment to the remote cluster - -Once you have the development container running, you can integrate your local development environment and the remote cluster. This enables you to access your remote app and instantly see any local changes you have made using your development container. - -1. First, download the latest [Telepresence binary](../../install) for your operating system and run `telepresence connect`. - Your local service is now able to interact with services and dependencies in your remote cluster. - For example, you can run `curl remote-service-name.namespace:port/path` and get an instant response locally in the same way you would in a remote cluster. -2. Run `telepresence intercept your-service-name` to reroute traffic for the service you’re working on. -3. Make a small change in your local code that will cause a visible change that you will be able to see when accessing your app. Build your service to trigger a reload within the container. -4. Now visit your ${INGRESS_IP} and view the change. - Notice the instant feedback of a local change combined with being able to access the remote dependencies! -5. Make another small change in your local code and build the application again. -Refresh your view of the app at ${INGRESS_IP}. - Notice that you didn’t need to re-deploy the container in the remote cluster to view your changes. Any request you make against the remote application that accesses your service will be routed to your local machine allowing you to instantly see the effects of changes you make to the code. -6. Now, put all these commands in a simple shell script, setup-dev-env.sh, which can auto-install Telepresence and configure your local development environment in one command. You can commit this script into your application’s source code repository and your colleagues can easily take advantage of this fast development loop you have created. An example script is included below: - ``` - # deploy your services to the remote cluster - echo `Add config to deploy the application to your remote cluster via kubectl or helm etc` - - # clone the service you want to work on - git clone https://github.com/spring-guides/gs-spring-boot.git - cd gs-spring-boot/complete - - # build local dev container - docker build -t example-dev-container:0.1 -f Dev.Dockerfile . - - # run local dev container - # the logs can be viewed by the `docker logs -f ` and the container id can found via `docker container ls` - docker run -d -v $(pwd):/app example-dev-container:0.1 - - # download Telepresence and install (instructions for non Mac users: https://www.getambassador.io/docs/telepresence/v2.4/install/) - sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/2.4.11/telepresence -o /usr/local/bin/telepresence - sudo chmod a+x /usr/local/bin/telepresence - - # connect your local dev env to the remote cluster - telepresence connect - - # re-route remote traffic to your local service - # telepresence intercept your-service-name - - # happy coding! - - ``` -## Share the result of your local changes with others - -Once you have your local development environment configured for fast feedback, you can securely share access and the ability to view the changes made in your local service with your teammates and stakeholders. - -1. Leave any current Telepresence intercepts you have running: - `telepresence leave your-service-name` -2. Login to Ambassador Cloud using your GitHub account that is affiliated with your organization. This is important because in order to secure control access to your local changes only people with a GitHub account that shares the same organization will be able to view this. - Run `telepresence login`. -3. Run `telepresence intercept your-service-name` again to reroute traffic for the service you’re working on. This time you will be required to answer several questions about your ingress configuration. -4. Once the command completes, take the “previewURL” that was generated as part of the output and share this with your teammates. Ask them to access the application via this URL (rather than the regular application URL). -5. Make a small change in your local code that causes a visible change that you can see when accessing your app. Build your service to trigger a reload within the container. -6. Ask your teammates to refresh their view of the application and instantly see the local changes you’ve made. - -## What's Next? - -Now that you've created a complete Kubernetes development environment, learn more about how to [manage your environment in Ambassador Cloud](https://www.getambassador.io/docs/cloud/latest/service-catalog/howtos/cells) or how to [create Preview URLs in Telepresence](https://www.getambassador.io/docs/telepresence/v2.4/howtos/preview-urls/). diff --git a/docs/v2.4/install/upgrade.md b/docs/v2.4/install/upgrade.md deleted file mode 100644 index 10d0ca13..00000000 --- a/docs/v2.4/install/upgrade.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -import Platform from '@src/components/Platform'; - -# Upgrade Process -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. - - - - -```shell -# Intel Macs - -# Upgrade via brew: -brew upgrade datawire/blackbird/telepresence - -# OR upgrade manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path by passing in -Path C:\my\custom\path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -After upgrading your CLI you must stop any live Telepresence processes by issuing `telepresence quit`, then upgrade the Traffic Manager by running `telepresence connect` - -**Note** that if the Traffic Manager has been installed via Helm, `telepresence connect` will never upgrade it. If you wish to upgrade a Traffic Manager that was installed via the Helm chart, please see the [the Helm documentation](../helm#upgrading-the-traffic-manager) diff --git a/docs/v2.4/quick-start/TelepresenceQuickStartLanding.js b/docs/v2.4/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 537a6325..00000000 --- a/docs/v2.4/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/v2.4/quick-start/demo-node.md b/docs/v2.4/quick-start/demo-node.md deleted file mode 100644 index 90a2e2f8..00000000 --- a/docs/v2.4/quick-start/demo-node.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.4/quick-start/demo-node/) diff --git a/docs/v2.4/quick-start/demo-react.md b/docs/v2.4/quick-start/demo-react.md deleted file mode 100644 index 2e0fc40a..00000000 --- a/docs/v2.4/quick-start/demo-react.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.4/quick-start/demo-react/) diff --git a/docs/v2.4/quick-start/go.md b/docs/v2.4/quick-start/go.md deleted file mode 100644 index 04704fc3..00000000 --- a/docs/v2.4/quick-start/go.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.4/quick-start/go/) diff --git a/docs/v2.4/quick-start/index.md b/docs/v2.4/quick-start/index.md deleted file mode 100644 index f2305d72..00000000 --- a/docs/v2.4/quick-start/index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- - description: Telepresence Quick Start. ---- - -import TelepresenceQuickStartLanding from './TelepresenceQuickStartLanding'; - - diff --git a/docs/v2.4/quick-start/qs-cards.js b/docs/v2.4/quick-start/qs-cards.js deleted file mode 100644 index 0d8c7226..00000000 --- a/docs/v2.4/quick-start/qs-cards.js +++ /dev/null @@ -1,69 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Create a Local K8s Dev Environment - - - - Read the advanced guide on how to create your own complete Kubernetes development environment. - - - - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - -
- ); -} diff --git a/docs/v2.4/quick-start/qs-go.md b/docs/v2.4/quick-start/qs-go.md deleted file mode 100644 index c2514635..00000000 --- a/docs/v2.4/quick-start/qs-go.md +++ /dev/null @@ -1,399 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## Create a complete development environment using this demo application - -Apply what you've learned from this guide and employ the Emojivoto application in your own local development environment. See the [Creating a local Kubernetes development environment](../../install/qs-go-advanced/) page to learn more. - -## What's Next? - - diff --git a/docs/v2.4/quick-start/qs-java.md b/docs/v2.4/quick-start/qs-java.md deleted file mode 100644 index 26b60de1..00000000 --- a/docs/v2.4/quick-start/qs-java.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.4/quick-start/qs-node.md b/docs/v2.4/quick-start/qs-node.md deleted file mode 100644 index 3280f10a..00000000 --- a/docs/v2.4/quick-start/qs-node.md +++ /dev/null @@ -1,383 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.4/quick-start/qs-python-fastapi.md b/docs/v2.4/quick-start/qs-python-fastapi.md deleted file mode 100644 index 3360261e..00000000 --- a/docs/v2.4/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,380 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.4/quick-start/qs-python.md b/docs/v2.4/quick-start/qs-python.md deleted file mode 100644 index 481487c7..00000000 --- a/docs/v2.4/quick-start/qs-python.md +++ /dev/null @@ -1,391 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.4/quick-start/telepresence-quickstart-landing.less b/docs/v2.4/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index 37304255..00000000 --- a/docs/v2.4/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,161 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1 { - color: @blue-dark; - font-weight: normal; - letter-spacing: 0.25px; - font-size: 33px; - } - p { - font-size: 0.875rem; - line-height: 24px; - margin: 0; - padding: 0; - } - - .demo-cluster-container { - display: grid; - margin: 40px 0; - grid-template-columns: repeat(2, 1fr); - column-gap: 40px; - @media screen and (max-width: 720px) { - grid-template-columns: repeat(1, 1fr); - row-gap: 50px; - } - } - .main-title-container { - display: flex; - flex-direction: column; - align-items: center; - p { - text-align: center; - font-size: 0.875rem; - } - } - h2.title { - font-size: 1.5rem; - color: @black; - font-weight: normal; - margin: 0 0 10px 0; - padding: 0; - &.underlined { - padding-bottom: 2px; - border-bottom: 3px solid @grey-separator; - text-align: center; - } - strong { - font-weight: 600; - } - } - .reading-time { - color: #7c7c87; - margin: 0; - } - .get-started { - font-size: 0.875rem; - font-weight: 600; - letter-spacing: 1.25px; - display: flex; - align-items: center; - margin: 20px 20px 10px; - text-decoration: none; - &.green { - color: @green; - } - &.green:hover { - color: @green-dark; - } - &.blue { - color: @blue; - } - &.blue:hover { - color: @blue-dark; - } - } - - .box-container { - border: 1.5px solid @grey-separator; - border-radius: 5px; - padding: 10px; - position: relative; - &::before { - content: ''; - position: absolute; - width: 14px; - height: 14px; - border-radius: 50%; - top: 0; - left: 50%; - transform: translate(-50%, -50%); - } - &.green::before { - background: @green; - box-shadow: 0 0 0 5px #00c05b45; - } - &.blue::before { - background: @blue; - box-shadow: 0 0 0 5px #0066ff45; - } - p { - font-size: 0.875rem; - line-height: 24px; - padding: 0; - } - } - .connector-container { - display: flex; - justify-content: center; - span { - background: @grey-separator; - width: 1.5px; - height: 37px; - } - } - - .telepresence-video { - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - h2.telepresence-video-title { - padding: 0; - margin: 0; - } - - strong { - color: @blue; - } - } - - .video-section { - display: grid; - grid-template-columns: 1fr 2fr; - column-gap: 10px; - @media screen and (max-width: 1400px) { - grid-template-columns: 1fr; - } - ul { - font-size: 14px; - margin: 0 10px 6px 0; - } - .video-container { - position: relative; - padding-bottom: 56.25%; // 16:9 aspect ratio - height: 0; - iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - } - } - } -} diff --git a/docs/v2.4/redirects.yml b/docs/v2.4/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/v2.4/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/v2.4/reference/architecture.md b/docs/v2.4/reference/architecture.md deleted file mode 100644 index 47facb0b..00000000 --- a/docs/v2.4/reference/architecture.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `kubectl describe -pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/v2.4/reference/client.md b/docs/v2.4/reference/client.md deleted file mode 100644 index 1fe86a1c..00000000 --- a/docs/v2.4/reference/client.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. -You can append `--help` to each command below to get even more information about its usage. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| [`login`](login) | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `license` | Formats a license from Ambassdor Cloud into a secret that can be [applied to your cluster](../cluster-config#add-license-to-cluster) if you require features of the extension in an air-gapped environment| -| `status` | Shows the current connectivity status | -| `quit` | Tell Telepresence daemons to quit | -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000`. A special flag `--docker-run` can be used to run the local instance [in a docker container](../docker-run). | -| `leave` | Stops an active intercept: `telepresence leave hello` | -| `preview` | Create or remove [preview URLs](../../howtos/preview-urls) for existing intercepts: `telepresence preview create ` | -| `loglevel` | Temporarily change the log-level of the traffic-manager, traffic-agents, and user and root daemons | -| `gather-logs` | Gather logs from traffic-manager, traffic-agents, user, and root daemons, and export them into a zip file that can be shared with others or included with a github issue. Use `--get-pod-yaml` to include the yaml for the `traffic-manager` and `traffic-agent`s. Use `--anonymize` to replace the actual pod names + namespaces used for the `traffic-manager` and pods containing `traffic-agent`s in the logs. | -| `version` | Show version of Telepresence CLI + Traffic-Manager (if connected) | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `current-cluster-id` | Get cluster ID for your kubernetes cluster, used for [configuring license](../cluster-config#add-license-to-cluster) in an air-gapped environment | -| `test-vpn` | Run a [configuration check](../vpn#the-test-vpn-command) on a VPN setup | diff --git a/docs/v2.4/reference/client/login.md b/docs/v2.4/reference/client/login.md deleted file mode 100644 index 269a240d..00000000 --- a/docs/v2.4/reference/client/login.md +++ /dev/null @@ -1,53 +0,0 @@ -# Telepresence Login - -```console -$ telepresence login --help -Authenticate to Ambassador Cloud - -Usage: - telepresence login [flags] - -Flags: - --apikey string Static API key to use instead of performing an interactive login -``` - -## Description - -Use `telepresence login` to explicitly authenticate with [Ambassador -Cloud](https://www.getambassador.io/docs/cloud). Unless the -[`skipLogin` option](../../config) is set, other commands will -automatically invoke the `telepresence login` interactive login -procedure as nescessary, so it is rarely nescessary to explicitly run -`telepresence login`; it should only be truly nescessary to explictly -run `telepresence login` when you require a non-interactive login. - -The normal interactive login procedure involves launching a web -browser, a user interacting with that web browser, and finally having -the web browser make callbacks to the local Telepresence process. If -it is not possible to do this (perhaps you are using a headless remote -box via SSH, or are using Telepresence in CI), then you may instead -have Ambassador Cloud issue an API key that you pass to `telepresence -login` with the `--apikey` flag. - -## Acquiring an API key - -1. Log in to Ambassador Cloud at https://app.getambassador.io/ . - -2. Click on your profile icon in the upper-left: ![Screenshot with the - mouse pointer over the upper-left profile icon](./apikey-2.png) - -3. Click on the "API Keys" menu button: ![Screenshot with the mouse - pointer over the "API Keys" menu button](./apikey-3.png) - -4. Click on the "generate new key" button in the upper-right: - ![Screenshot with the mouse pointer over the "generate new key" - button](./apikey-4.png) - -5. Enter a description for the key (perhaps the name of your laptop, - or perhaps the "CI"), and click "generate api key" to create it. - -You may now pass the API key as `KEY` to `telepresence login --apikey=KEY`. - -Telepresence will use that "master" API key to create narrower keys -for different components of Telepresence. You will see these appear -in the Ambassador Cloud web interface. diff --git a/docs/v2.4/reference/client/login/apikey-2.png b/docs/v2.4/reference/client/login/apikey-2.png deleted file mode 100644 index 1379502a..00000000 Binary files a/docs/v2.4/reference/client/login/apikey-2.png and /dev/null differ diff --git a/docs/v2.4/reference/client/login/apikey-3.png b/docs/v2.4/reference/client/login/apikey-3.png deleted file mode 100644 index 4559b784..00000000 Binary files a/docs/v2.4/reference/client/login/apikey-3.png and /dev/null differ diff --git a/docs/v2.4/reference/client/login/apikey-4.png b/docs/v2.4/reference/client/login/apikey-4.png deleted file mode 100644 index 25c6581a..00000000 Binary files a/docs/v2.4/reference/client/login/apikey-4.png and /dev/null differ diff --git a/docs/v2.4/reference/cluster-config.md b/docs/v2.4/reference/cluster-config.md deleted file mode 100644 index 48aaa6f9..00000000 --- a/docs/v2.4/reference/cluster-config.md +++ /dev/null @@ -1,312 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; -import { ClusterConfig } from '@src/components/Docs/Telepresence'; - -# Cluster-side configuration - -For the most part, Telepresence doesn't require any special -configuration in the cluster and can be used right away in any -cluster (as long as the user has adequate [RBAC permissions](../rbac) -and the cluster's server version is `1.17.0` or higher). - -However, some advanced features do require some configuration in the -cluster. - -## TLS - -In this example, other applications in the cluster expect to speak TLS to your -intercepted application (perhaps you're using a service-mesh that does -mTLS). - -In order to use `--mechanism=http` (or any features that imply -`--mechanism=http`) you need to tell Telepresence about the TLS -certificates in use. - -Tell Telepresence about the certificates in use by adjusting your -[workload's](../intercepts/#supported-workloads) Pod template to set a couple of -annotations on the intercepted Pods: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ "getambassador.io/inject-terminating-tls-secret": "your-terminating-secret" # optional -+ "getambassador.io/inject-originating-tls-secret": "your-originating-secret" # optional - spec: -+ serviceAccountName: "your-account-that-has-rbac-to-read-those-secrets" - containers: -``` - -- The `getambassador.io/inject-terminating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS server - certificate to use for decrypting and responding to incoming - requests. - - When Telepresence modifies the Service and workload port - definitions to point at the Telepresence Agent sidecar's port - instead of your application's actual port, the sidecar will use this - certificate to terminate TLS. - -- The `getambassador.io/inject-originating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS - client certificate to use for communicating with your application. - - You will need to set this if your application expects incoming - requests to speak TLS (for example, your - code expects to handle mTLS itself instead of letting a service-mesh - sidecar handle mTLS for it, or the port definition that Telepresence - modified pointed at the service-mesh sidecar instead of at your - application). - - If you do set this, you should to set it to the - same client certificate Secret that you configure the Ambassador - Edge Stack to use for mTLS. - -It is only possible to refer to a Secret that is in the same Namespace -as the Pod. - -The Pod will need to have permission to `get` and `watch` each of -those Secrets. - -Telepresence understands `type: kubernetes.io/tls` Secrets and -`type: istio.io/key-and-cert` Secrets; as well as `type: Opaque` -Secrets that it detects to be formatted as one of those types. - -## Air gapped cluster - -If your cluster is on an isolated network such that it cannot -communicate with Ambassador Cloud, then some additional configuration -is required to acquire a license key in order to use personal -intercepts. - -### Create a license - -1. - -2. Generate a new license (if one doesn't already exist) by clicking *Generate New License*. - -3. You will be prompted for your Cluster ID. Ensure your -kubeconfig context is using the cluster you want to create a license for then -run this command to generate the Cluster ID: - - ``` - $ telepresence current-cluster-id - - Cluster ID: - ``` - -4. Click *Generate API Key* to finish generating the license. - -5. On the licenses page, download the license file associated with your cluster. - -### Add license to cluster -There are two separate ways you can add the license to your cluster: manually creating and deploying -the license secret or having the helm chart manage the secret - -You only need to do one of the two options. - -#### Manual deploy of license secret - -1. Use this command to generate a Kubernetes Secret config using the license file: - - ``` - $ telepresence license -f - - apiVersion: v1 - data: - hostDomain: - license: - kind: Secret - metadata: - creationTimestamp: null - name: systema-license - namespace: ambassador - ``` - -2. Save the output as a YAML file and apply it to your -cluster with `kubectl`. - -3. When deploying the `traffic-manager` chart, you must add the additional values when running `helm install` by putting -the following into a file (for the example we'll assume it's called license-values.yaml) - - ``` - licenseKey: - # This mounts the secret into the traffic-manager - create: true - secret: - # This tells the helm chart not to create the secret since you've created it yourself - create: false - ``` - -4. Install the helm chart into the cluster - - ``` - helm install traffic-manager -n ambassador datawire/telepresence --create-namespace -f license-values.yaml - ``` - -5. Ensure that you have the docker image for the Smart Agent (datawire/ambassador-telepresence-agent:1.11.0) -pulled and in a registry your cluster can pull from. - -6. Have users use the `images` [config key](../config/#images) keys so telepresence uses the aforementioned image for their agent. - -#### Helm chart manages the secret - -1. Get the jwt token from the downloaded license file - - ``` - $ cat ~/Downloads/ambassador.License_for_yourcluster - eyJhbGnotarealtoken.butanexample - ``` - -2. Create the following values file, substituting your real jwt token in for the one used in the example below. -(for this example we'll assume the following is placed in a file called license-values.yaml) - - ``` - licenseKey: - # This mounts the secret into the traffic-manager - create: true - # This is the value from the license file you download. this value is an example and will not work - value: eyJhbGnotarealtoken.butanexample - secret: - # This tells the helm chart to create the secret - create: true - ``` - -3. Install the helm chart into the cluster - - ``` - helm install traffic-manager charts/telepresence -n ambassador --create-namespace -f license-values.yaml - ``` - -Users will now be able to use preview intercepts with the -`--preview-url=false` flag. Even with the license key, preview URLs -cannot be used without enabling direct communication with Ambassador -Cloud, as Ambassador Cloud is essential to their operation. - -If using Helm to install the server-side components, see the chart's [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence) to learn how to configure the image registry and license secret. - -Have clients use the [skipLogin](../config/#cloud) key to ensure the cli knows it is operating in an -air-gapped environment. - -## Mutating Webhook - -By default, Telepresence updates the intercepted workload (Deployment, StatefulSet, ReplicaSet) -template to add the [Traffic Agent](../architecture/#traffic-agent) sidecar container and update the -port definitions. If you use GitOps workflows (with tools like ArgoCD) to automatically update your -cluster so that it reflects the desired state from an external Git repository, this behavior can make -your workload out of sync with that external desired state. - -To solve this issue, you can use Telepresence's Mutating Webhook alternative mechanism. Intercepted -workloads will then stay untouched and only the underlying pods will be modified to inject the Traffic -Agent sidecar container and update the port definitions. - -Simply add the `telepresence.getambassador.io/inject-traffic-agent: enabled` annotation to your -workload template's annotations: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ telepresence.getambassador.io/inject-traffic-agent: enabled - spec: - containers: -``` - -### Service Port Annotation - -A service port annotation can be added to the workload to make the Mutating Webhook select a specific port -in the service. This is necessary when the service has multiple ports. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-port: https - spec: - containers: -``` - -### Service Name Annotation - -A service name annotation can be added to the workload to make the Mutating Webhook select a specific Kubernetes service. -This is necessary when the workload is exposed by multiple services. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-name: my-service - spec: - containers: -``` - -### Note on Numeric Ports - -If the targetPort of your intercepted service is pointing at a port number, in addition to -injecting the Traffic Agent sidecar, Telepresence will also inject an initContainer that will -reconfigure the pod's firewall rules to redirect traffic to the Traffic Agent. - - -Note that this initContainer requires `NET_ADMIN` capabilities. -If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. - - - -This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. -To enable running as GID 7777 on a specific openshift namespace, run: -oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE - - -If you need to use numeric ports without the aforementioned capabilities, you can [manually install the agent](../intercepts/manual-agent) - -For example, the following service is using a numeric port, so Telepresence would inject an initContainer into it: -```yaml -apiVersion: v1 -kind: Service -metadata: - name: your-service -spec: - type: ClusterIP - selector: - service: your-service - ports: - - port: 80 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: your-service - labels: - service: your-service -spec: - replicas: 1 - selector: - matchLabels: - service: your-service - template: - metadata: - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled - labels: - service: your-service - spec: - containers: - - name: your-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 -``` diff --git a/docs/v2.4/reference/config.md b/docs/v2.4/reference/config.md deleted file mode 100644 index 3d42b005..00000000 --- a/docs/v2.4/reference/config.md +++ /dev/null @@ -1,298 +0,0 @@ -# Laptop-side configuration - -## Global Configuration -Telepresence uses a `config.yml` file to store and change certain global configuration values that will be used for all clusters you use Telepresence with. The location of this file varies based on your OS: - -* macOS: `$HOME/Library/Application Support/telepresence/config.yml` -* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` -* Windows: `%APPDATA%\telepresence\config.yml` - -For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. - -### Values - -The config file currently supports values for the `timeouts`, `logLevels`, `images`, `cloud`, and `grpc` keys. - -Here is an example configuration to show you the conventions of how Telepresence is configured: -**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** - -```yaml -timeouts: - agentInstall: 1m - intercept: 10s -logLevels: - userDaemon: debug -images: - registry: privateRepo # This overrides the default docker.io/datawire repo - agentImage: ambassador-telepresence-agent:1.8.0 # This overrides the agent image to inject when intercepting -cloud: - refreshMessages: 24h # Refresh messages from cloud every 24 hours instead of the default, which is 1 week. -grpc: - maxReceiveSize: 10Mi -telepresenceAPI: - port: 9980 -intercept: - appProtocolStrategy: portName - defaultPort: "8088" -``` - -#### Timeouts - -Values for `timeouts` are all durations either as a number of seconds -or as a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings -can be fractional (`1.5h`) or combined (`2h45m`). - -These are the valid fields for the `timeouts` key: - -| Field | Description | Type | Default | -|-------------------------|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|------------| -| `agentInstall` | Waiting for Traffic Agent to be installed | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | -| `apply` | Waiting for a Kubernetes manifest to be applied | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 1 minute | -| `clusterConnect` | Waiting for cluster to be connected | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | -| `intercept` | Waiting for an intercept to become active | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | -| `proxyDial` | Waiting for an outbound connection to be established | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | -| `trafficManagerConnect` | Waiting for the Traffic Manager API to connect for port fowards | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | -| `trafficManagerAPI` | Waiting for connection to the gPRC API after `trafficManagerConnect` is successful | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 15 seconds | -| `helm` | Waiting for Helm operations (e.g. `install`) on the Traffic Manager | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | - -#### Log Levels - -Values for the `logLevels` fields are one of the following strings, -case insensitive: - - - `trace` - - `debug` - - `info` - - `warning` or `warn` - - `error` - -For whichever log-level you select, you will get logs labeled with that level and of higher severity. -(e.g. if you use `info`, you will also get logs labeled `error`. You will NOT get logs labeled `debug`. - -These are the valid fields for the `logLevels` key: - -| Field | Description | Type | Default | -|--------------|---------------------------------------------------------------------|---------------------------------------------|---------| -| `userDaemon` | Logging level to be used by the User Daemon (logs to connector.log) | [loglevel][logrus-level] [string][yaml-str] | debug | -| `rootDaemon` | Logging level to be used for the Root Daemon (logs to daemon.log) | [loglevel][logrus-level] [string][yaml-str] | info | - -#### Images -Values for `images` are strings. These values affect the objects that are deployed in the cluster, -so it's important to ensure users have the same configuration. - -Additionally, you can deploy the server-side components with [Helm](../../install/helm), to prevent them -from being overridden by a client's config and use the [mutating-webhook](../cluster-config/#mutating-webhook) -to handle installation of the `traffic-agents`. - -These are the valid fields for the `images` key: - -| Field | Description | Type | Default | -|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------|----------------------| -| `registry` | Docker registry to be used for installing the Traffic Manager and default Traffic Agent. If not using a helm chart to deploy server-side objects, changing this value will create a new traffic-manager deployment when using Telepresence commands. Additionally, changing this value will update installed default `traffic-agents` to use the new registry when creating a new intercept. | Docker registry name [string][yaml-str] | `docker.io/datawire` | -| `agentImage` | `$registry/$imageName:$imageTag` to use when installing the Traffic Agent. Changing this value will update pre-existing `traffic-agents` to use this new image. *The `registry` value is not used for the `traffic-agent` if you have this value set.* | qualified Docker image name [string][yaml-str] | (unset) | -| `webhookRegistry` | The container `$registry` that the [Traffic Manager](../cluster-config/#mutating-webhook) will use with the `webhookAgentImage` *This value is only used if a new `traffic-manager` is deployed* | Docker registry name [string][yaml-str] | `docker.io/datawire` | -| `webhookAgentImage` | The container image that the [Traffic Manager](../cluster-config/#mutating-webhook) will pull from the `webhookRegistry` when installing the Traffic Agent in annotated pods *This value is only used if a new `traffic-manager` is deployed* | non-qualified Docker image name [string][yaml-str] | (unset) | - -#### Cloud -Values for `cloud` are listed below and their type varies, so please see the chart for the expected type for each config value. -These fields control how the client interacts with the Cloud service. - -| Field | Description | Type | Default | -|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------|---------| -| `skipLogin` | Whether the CLI should skip automatic login to Ambassador Cloud. If set to true, in order to perform personal intercepts you must have a [license key](../cluster-config/#air-gapped-cluster) installed in the cluster. | [bool][yaml-bool] | false | -| `refreshMessages` | How frequently the CLI should communicate with Ambassador Cloud to get new command messages, which also resets whether the message has been raised or not. You will see each message at most once within the duration given by this config | [duration][go-duration] [string][yaml-str] | 168h | -| `systemaHost` | The host used to communicate with Ambassador Cloud | [string][yaml-str] | app.getambassador.io | -| `systemaPort` | The port used with `systemaHost` to communicate with Ambassador Cloud | [string][yaml-str] | 443 | - -Telepresence attempts to auto-detect if the cluster is capable of -communication with Ambassador Cloud, but may still prompt you to log -in in cases where only the on-laptop client wishes to communicate with -Ambassador Cloud. If you want those auto-login points to be disabled -as well, or would like it to not attempt to communicate with -Ambassador Cloud at all (even for the auto-detection), then be sure to -set the `skipLogin` value to `true`. - -Reminder: To use personal intercepts, which normally require a login, -you must have a license key in your cluster and specify which -`agentImage` should be installed by also adding the following to your -`config.yml`: - -```yaml -images: - agentImage: / -``` - -#### Grpc -The `maxReceiveSize` determines how large a message that the workstation receives via gRPC can be. The default is 4Mi (determined by gRPC). All traffic to and from the cluster is tunneled via gRPC. - -The size is measured in bytes. You can express it as a plain integer or as a fixed-point number using E, G, M, or K. You can also use the power-of-two equivalents: Gi, Mi, Ki. For example, the following represent roughly the same value: -``` -128974848, 129e6, 129M, 123Mi -``` - -#### RESTful API server -The `telepresenceAPI` controls the behavior of Telepresence's RESTful API server that can be queried for additional information about ongoing intercepts. When present, and the `port` is set to a valid port number, it's propagated to the auto-installer so that application containers that can be intercepted gets the `TELEPRESENCE_API_PORT` environment set. The server can then be queried at `localhost:`. In addition, the `traffic-agent` and the `user-daemon` on the workstation that performs an intercept will start the server on that port. -If the `traffic-manager` is auto-installed, its webhook agent injector will be configured to add the `TELEPRESENCE_API_PORT` environment to the app container when the `traffic-agent` is injected. -See [RESTful API server](../restapi) for more info. - -#### Intercept -The `intercept` controls applies to how telepresence will intercept the communications to the intercepted service. - -The `defaultPort` controls which port is selected when no `--port` flag is given to the `telepresence intercept` command. The default value is "8080". - -The `appProtocolStrategy` is only relevant when using personal intercepts. This controls how telepresence selects the application protocol to use when intercepting a service that has no `service.ports.appProtocol` defined. Valid values are: - -| Value | Resulting action | -|--------------|--------------------------------------------------------------------------------------------------------| -| `http2Probe` | The telepresence traffic-agent will probe the intercepted container to check whether it supports http2 | -| `portName` | Telepresence will make an educated guess about the protocol based on the name of the service port | -| `http` | Telepresence will use http | -| `http2` | Telepresence will use http2 | - -When `portName` is used, Telepresence will determine the protocol by the name of the port: `[-suffix]`. The following protocols are recognized: - -| Protocol | Meaning | -|----------|---------------------------------------| -| `http` | Plaintext HTTP/1.1 traffic | -| `http2` | Plaintext HTTP/2 traffic | -| `https` | TLS Encrypted HTTP (1.1 or 2) traffic | -| `grpc` | Same as http2 | - -## Per-Cluster Configuration -Some configuration is not global to Telepresence and is actually specific to a cluster. Thus, we store that config information in your kubeconfig file, so that it is easier to maintain per-cluster configuration. - -### Values -The current per-cluster configuration supports `dns`, `alsoProxy`, and `manager` keys. -To add configuration, simply add a `telepresence.io` entry to the cluster in your kubeconfig like so: - -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - also-proxy: - manager: - name: example-cluster -``` -#### DNS -The fields for `dns` are: local-ip, remote-ip, exclude-suffixes, include-suffixes, and lookup-timeout. - -| Field | Description | Type | Default | -|--------------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------|-----------------------------------------------------------------------------| -| `local-ip` | The address of the local DNS server. This entry is only used on Linux systems that are not configured to use systemd-resolved. | IP address [string][yaml-str] | first `nameserver` mentioned in `/etc/resolv.conf` | -| `remote-ip` | The address of the cluster's DNS service. | IP address [string][yaml-str] | IP of the `kube-dns.kube-system` or the `dns-default.openshift-dns` service | -| `exclude-suffixes` | Suffixes for which the DNS resolver will always fail (or fallback in case of the overriding resolver) | [sequence][yaml-seq] of [strings][yaml-str] | `[".arpa", ".com", ".io", ".net", ".org", ".ru"]` | -| `include-suffixes` | Suffixes for which the DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | -| `lookup-timeout` | Maximum time to wait for a cluster side host lookup. | [duration][go-duration] [string][yaml-str] | 4 seconds | - -Here is an example kubeconfig: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - include-suffixes: - - .se - exclude-suffixes: - - .com - name: example-cluster -``` - - -#### AlsoProxy - -When using `also-proxy`, you provide a list of subnets after the key in your kubeconfig file to be added to the TUN device. -All connections to addresses that the subnet spans will be dispatched to the cluster - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - also-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -#### NeverProxy - -When using `never-proxy` you provide a list of subnets after the key in your kubeconfig file. These will never be routed via the -TUN device, even if they fall within the subnets (pod or service) for the cluster. Instead, whatever route they have before -telepresence connects is the route they will keep. - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - never-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -##### Using AlsoProxy together with NeverProxy - -Never proxy and also proxy are implemented as routing rules, meaning that when the two conflict, regular routing routes apply. -Usually this means that the most specific route will win. - -So, for example, if an `also-proxy` subnet falls within a broader `never-proxy` subnet: - -```yaml -never-proxy: [10.0.0.0/16] -also-proxy: [10.0.5.0/24] -``` - -Then the specific `also-proxy` of `10.0.5.0/24` will be proxied by the TUN device, whereas the rest of `10.0.0.0/16` will not. - -Conversely if a `never-proxy` subnet is inside a larger `also-proxy` subnet: - -```yaml -also-proxy: [10.0.0.0/16] -never-proxy: [10.0.5.0/24] -``` - -Then all of the also-proxy of `10.0.0.0/16` will be proxied, with the exception of the specific `never-proxy` of `10.0.5.0/24` - -#### Manager - -The `manager` key contains configuration for finding the `traffic-manager` that telepresence will connect to. It supports one key, `namespace`, indicating the namespace where the traffic manager is to be found - -Here is an example kubeconfig that will instruct telepresence to connect to a manager in namespace `staging`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` - -[yaml-bool]: https://yaml.org/type/bool.html -[yaml-float]: https://yaml.org/type/float.html -[yaml-int]: https://yaml.org/type/int.html -[yaml-seq]: https://yaml.org/type/seq.html -[yaml-str]: https://yaml.org/type/str.html -[go-duration]: https://pkg.go.dev/time#ParseDuration -[logrus-level]: https://github.com/sirupsen/logrus/blob/v1.8.1/logrus.go#L25-L45 diff --git a/docs/v2.4/reference/dns.md b/docs/v2.4/reference/dns.md deleted file mode 100644 index e38fbc61..00000000 --- a/docs/v2.4/reference/dns.md +++ /dev/null @@ -1,75 +0,0 @@ -# DNS resolution - -The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces of currently active intercepts. Processes running locally on the desktop will have network access to all services in the such namespaces by service-name only. - -All intercepts contribute to the DNS resolver, even those that do not use the `--namespace=` option. This is because `--namespace default` is implied, and in this context, `default` is treated just like any other namespace. - -No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). - -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. - -No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. - -``` -$ telepresence connect - - Connecting to traffic manager... - Connected to context default (https://) - -$ telepresence list - - web-app-5d568ccc6b : ready to intercept (traffic-agent not yet installed) - emoji : ready to intercept (traffic-agent not yet installed) - web : ready to intercept (traffic-agent not yet installed) - web-app-5d568ccc6b : ready to intercept (traffic-agent not yet installed) - -$ curl web-app:80 - - curl: (6) Could not resolve host: web-app - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl web-app.emojivoto:80 - - - - - - Emoji Vote - ... -``` - -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. - -``` -$ telepresence intercept web --port 8080 - - Using Deployment web - intercepted - Intercept name : web - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Volume Mount Point: /tmp/telfs-166119801 - Intercepting : HTTP requests that match all headers: - 'x-telepresence-intercept-id: 8eac04e3-bf24-4d62-b3ba-35297c16f5cd:web' - -$ curl webapp:80 - - - - - - Emoji Vote - ... -``` - -Now curling that service by its short name works and will as long as the intercept is active. - -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. - -See [Outbound connectivity](../routing/#dns-resolution) for details on DNS lookups. diff --git a/docs/v2.4/reference/docker-run.md b/docs/v2.4/reference/docker-run.md deleted file mode 100644 index 2262f0a5..00000000 --- a/docs/v2.4/reference/docker-run.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -Description: "How a Telepresence intercept can run a Docker container with configured environment and volume mounts." ---- - -# Using Docker for intercepts - -If you want your intercept to go to a Docker container on your laptop, use the `--docker-run` option. It creates the intercept, runs your container in the foreground, then automatically ends the intercept when the container exits. - -`telepresence intercept --port --docker-run -- ` - -The `--` separates flags intended for `telepresence intercept` from flags intended for `docker run`. - -## Example - -Imagine you are working on a new version of a your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. - -`telepresence intercept frontend-v1 --port 8000 --docker-run -- frontend-v2` - -## Ports - -The `--port` flag can specify an additional port when `--docker-run` is used so that the local and container port can be different. This is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. - -## Flags - -Telepresence will automatically pass some relevant flags to Docker in order to connect the container with the intercept. Those flags are combined with the arguments given after `--` on the command line. - -- `--dns-search tel2-search` Enables single label name lookups in intercepted namespaces -- `--env-file ` Loads the intercepted environment -- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line -- `-p ` The local port for the intercept and the container port -- `-v ` Volume mount specification, see CLI help for `--mount` and `--docker-mount` flags for more info diff --git a/docs/v2.4/reference/environment.md b/docs/v2.4/reference/environment.md deleted file mode 100644 index 7f83ff11..00000000 --- a/docs/v2.4/reference/environment.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: "How Telepresence can import environment variables from your Kubernetes cluster to use with code running on your laptop." ---- - -# Environment variables - -Telepresence can import environment variables from the cluster pod when running an intercept. -You can then use these variables with the code running on your laptop of the service being intercepted. - -There are three options available to do this: - -1. `telepresence intercept [service] --port [port] --env-file=FILENAME` - - This will write the environment variables to a Docker Compose `.env` file. This file can be used with `docker-compose` when starting containers locally. Please see the Docker documentation regarding the [file syntax](https://docs.docker.com/compose/env-file/) and [usage](https://docs.docker.com/compose/environment-variables/) for more information. - -2. `telepresence intercept [service] --port [port] --env-json=FILENAME` - - This will write the environment variables to a JSON file. This file can be injected into other build processes. - -3. `telepresence intercept [service] --port [port] -- [COMMAND]` - - This will run a command locally with the pod's environment variables set on your laptop. Once the command quits the intercept is stopped (as if `telepresence leave [service]` was run). This can be used in conjunction with a local server command, such as `python [FILENAME]` or `node [FILENAME]` to run a service locally while using the environment variables that were set on the pod via a ConfigMap or other means. - - Another use would be running a subshell, Bash for example: - - `telepresence intercept [service] --port [port] -- /bin/bash` - - This would start the intercept then launch the subshell on your laptop with all the same variables set as on the pod. - -## Telepresence Environment Variables - -Telepresence adds some useful environment variables in addition to the ones imported from the intercepted pod: - -### TELEPRESENCE_ROOT -Directory where all remote volumes mounts are rooted. See [Volume Mounts](../volume/) for more info. - -### TELEPRESENCE_MOUNTS -Colon separated list of remotely mounted directories. - -### TELEPRESENCE_CONTAINER -The name of the intercepted container. Useful when a pod has several containers, and you want to know which one that was intercepted by Telepresence. - -### TELEPRESENCE_INTERCEPT_ID -ID of the intercept (same as the "x-intercept-id" http header). - -Useful if you need special behavior when intercepting a pod. One example might be when dealing with pub/sub systems like Kafka, where all processes that don't have the `TELEPRESENCE_INTERCEPT_ID` set can filter out all messages that contain an `x-intercept-id` header, while those that do, instead filter based on a matching `x-intercept-id` header. This is to assure that messages belonging to a certain intercept always are consumed by the intercepting process. diff --git a/docs/v2.4/reference/inside-container.md b/docs/v2.4/reference/inside-container.md deleted file mode 100644 index 5f1dd2bd..00000000 --- a/docs/v2.4/reference/inside-container.md +++ /dev/null @@ -1,37 +0,0 @@ -# Running Telepresence inside a container - -It is sometimes desirable to run Telepresence inside a container. One reason can be to avoid any side effects on the workstation's network, another can be to establish multiple sessions with the traffic manager, or even work with different clusters simultaneously. - -## Building the container - -Building a container with a ready-to-run Telepresence is easy because there are relatively few external dependencies. Add the following to a `Dockerfile`: - -```Dockerfile -# Dockerfile with telepresence and its prerequisites -FROM alpine:3.13 - -# Install Telepresence prerequisites -RUN apk add --no-cache curl iproute2 sshfs - -# Download and install the telepresence binary -RUN curl -fL https://app.getambassador.io/download/tel2/linux/amd64/2.4.11/telepresence -o telepresence && \ - install -o root -g root -m 0755 telepresence /usr/local/bin/telepresence -``` -In order to build the container, do this in the same directory as the `Dockerfile`: -``` -$ docker build -t tp-in-docker . -``` - -## Running the container - -Telepresence will need access to the `/dev/net/tun` device on your Linux host (or, in case the host isn't Linux, the Linux VM that Docker starts automatically), and a Kubernetes config that identifies the cluster. It will also need `--cap-add=NET_ADMIN` to create its Virtual Network Interface. - -The command to run the container can look like this: -```bash -$ docker run \ - --cap-add=NET_ADMIN \ - --device /dev/net/tun:/dev/net/tun \ - --network=host \ - -v ~/.kube/config:/root/.kube/config \ - -it --rm tp-in-docker -``` diff --git a/docs/v2.4/reference/intercepts/index.md b/docs/v2.4/reference/intercepts/index.md deleted file mode 100644 index bd9c5bdc..00000000 --- a/docs/v2.4/reference/intercepts/index.md +++ /dev/null @@ -1,366 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Intercepts - -When intercepting a service, Telepresence installs a *traffic-agent* -sidecar in to the workload. That traffic-agent supports one or more -intercept *mechanisms* that it uses to decide which traffic to -intercept. Telepresence has a simple default traffic-agent, however -you can configure a different traffic-agent with more sophisticated -mechanisms either by setting the [`images.agentImage` field in -`config.yml`](../config/#images) or by writing an -[`extensions/${extension}.yml`][extensions] file that tells -Telepresence about a traffic-agent that it can use, what mechanisms -that traffic-agent supports, and command-line flags to expose to the -user to configure that mechanism. You may tell Telepresence which -known mechanism to use with the `--mechanism=${mechanism}` flag or by -setting one of the `--${mechansim}-XXX` flags, which implicitly set -the mechanism; for example, setting `--http-match=auto` implicitly -sets `--mechanism=http`. - -The default open-source traffic-agent only supports the `tcp` -mechanism, which treats the raw layer 4 TCP streams as opaque and -sends all of that traffic down to the developer's workstation. This -means that it is a "global" intercept, affecting all users of the -cluster. - -In addition to the default open-source traffic-agent, Telepresence -already knows about the Ambassador Cloud -[traffic-agent][ambassador-agent], which supports the `http` -mechanism. The `http` mechanism operates at higher layer, working -with layer 7 HTTP, and may intercept specific HTTP requests, allowing -other HTTP requests through to the regular service. This allows for -"personal" intercepts which only intercept traffic tagged as belonging -to a given developer. - -[extensions]: https://pkg.go.dev/github.com/telepresenceio/telepresence/v2@v$version$/pkg/client/cli/extensions -[ambassador-agent]: https://github.com/telepresenceio/telepresence/blob/release/v2/pkg/client/cli/extensions/builtin.go#L30-L50 - -## Intercept behavior when logged in to Ambassador Cloud - -Logging in to Ambassador Cloud (with [`telepresence -login`](../client/login/)) changes the Telepresence defaults in two -ways. - -First, being logged in to Ambassador Cloud causes Telepresence to -default to `--mechanism=http --http-match=auto` (or just -`--http-match=auto`, as `--http-match` implies `--mechanism=http`). -If you hadn't been logged in it would have defaulted to -`--mechanism=tcp`. This tells Telepresence to use the Ambassador -Cloud traffic-agent to do smart "personal" intercepts and only -intercept a subset of HTTP requests, rather than just intercepting the -entirety of all TCP connections. This is important for working in a -shared cluster with teammates, and is important for the preview URL -functionality below. See `telepresence intercept --help` for -information on using `--http-match` to customize which requests it -intercepts. - -Secondly, being logged in causes Telepresence to default to -`--preview-url=true`. If you hadn't been logged in it would have -defaulted to `--preview-url=false`. This tells Telepresence to take -advantage of Ambassador Cloud to create a preview URL for this -intercept, creating a shareable URL that automatically sets the -appropriate headers to have requests coming from the preview URL be -intercepted. In order to create the preview URL, it will prompt you -for four settings about how your cluster's ingress is configured. For -each, Telepresence tries to intelligently detect the correct value for -your cluster; if it detects it correctly, may simply press "enter" and -accept the default, otherwise you must tell Telepresence the correct -value. - -When you create an intercept with the `http` mechanism, Telepresence -determines whether the application protocol uses HTTP/1.1 or HTTP/2. If the -service's `ports.appProtocol` field is set, Telepresence uses that. If not, -then Telepresence uses the configured application protocol strategy to determine -the protocol. The default behavior (`http2Probe` strategy) sends a -`GET /telepresence-http2-check` request to your service to determine if it supports -HTTP/2. This is required for the intercepts to behave correctly. - -### TLS - -If the intercepted service has been set up for `--mechanism=http`, Telepresence -needs to terminate the TLS connection for the `http` mechanism to function in your -intercepts. Additionally, you need to ensure the -[TLS annotations](../cluster-config/#tls) are properly entered in your workload’s -Pod template to designate that requests leaving your service still speak TLS -outside of the service as expected. - -Use the `--http-plaintext` flag when doing an intercept when the service in the -cluster is using TLS in case you want to use plaintext for the communication with the -process on your local workstation. - -## Supported workloads - -Kubernetes has various -[workloads](https://kubernetes.io/docs/concepts/workloads/). -Currently, Telepresence supports intercepting (installing a -traffic-agent on) `Deployments`, `ReplicaSets`, and `StatefulSets`. - - - -While many of our examples use Deployments, they would also work on -ReplicaSets and StatefulSets - - - -## Specifying a namespace for an intercept - -The namespace of the intercepted workload is specified using the -`--namespace` option. When this option is used, and `--workload` is -not used, then the given name is interpreted as the name of the -workload and the name of the intercept will be constructed from that -name and the namespace. - -```shell -telepresence intercept hello --namespace myns --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept -`hello-myns`. In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the workload is specified. - -```shell -telepresence intercept myhello --namespace myns --workload hello --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept `myhello`. - -## Importing environment variables - -Telepresence can import the environment variables from the pod that is -being intercepted, see [this doc](../environment/) for more details. - -## Creating an intercept without a preview URL - -If you *are not* logged in to Ambassador Cloud, the following command -will intercept all traffic bound to the service and proxy it to your -laptop. This includes traffic coming through your ingress controller, -so use this option carefully as to not disrupt production -environments. - -```shell -telepresence intercept --port= -``` - -If you *are* logged in to Ambassador Cloud, setting the -`--preview-url` flag to `false` is necessary. - -```shell -telepresence intercept --port= --preview-url=false -``` - -This will output an HTTP header that you can set on your request for -that traffic to be intercepted: - -```console -$ telepresence intercept --port= --preview-url=false -Using Deployment -intercepted - Intercept name: - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -```console -$ telepresence status -Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" -User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 1 total - dataprocessingnodeservice: @ -``` - -Finally, run `telepresence leave ` to stop the intercept. - -## Skipping the ingress dialogue - -You can skip the ingress dialogue by setting the relevant parameters using flags. If any of the following flags are set, the dialogue will be skipped and the flag values will be used instead. If any of the required flags are missing, an error will be thrown. - -| Flag | Description | Required | -| -------------- | ------------------------------ | --- | -| `--ingress-host` | The ip address for the ingress | yes | -| `--ingress-port` | The port for the ingress | yes | -| `--ingress-tls` | Whether tls should be used | no | -| `--ingress-l5` | Whether a different ip address should be used in request headers | no | - -## Creating an intercept when a service has multiple ports - -If you are trying to intercept a service that has multiple ports, you -need to tell Telepresence which service port you are trying to -intercept. To specify, you can either use the name of the service -port or the port number itself. To see which options might be -available to you and your service, use kubectl to describe your -service or look in the object's YAML. For more information on multiple -ports, see the [Kubernetes documentation][kube-multi-port-services]. - -[kube-multi-port-services]: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - -```console -$ telepresence intercept --port=: -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -When intercepting a service that has multiple ports, the name of the -service port that has been intercepted is also listed. - -If you want to change which port has been intercepted, you can create -a new intercept the same way you did above and it will change which -service port is being intercepted. - -## Creating an intercept When multiple services match your workload - -Oftentimes, there's a 1-to-1 relationship between a service and a -workload, so telepresence is able to auto-detect which service it -should intercept based on the workload you are trying to intercept. -But if you use something like -[Argo](https://www.getambassador.io/docs/argo/latest/quick-start/), there may be -two services (that use the same labels) to manage traffic between a -canary and a stable service. - -Fortunately, if you know which service you want to use when -intercepting a workload, you can use the `--service` flag. So in the -aforementioned example, if you wanted to use the `echo-stable` service -when intercepting your workload, your command would look like this: - -```console -$ telepresence intercept echo-rollout- --port --service echo-stable -Using ReplicaSet echo-rollout- -intercepted - Intercept name : echo-rollout- - State : ACTIVE - Workload kind : ReplicaSet - Destination : 127.0.0.1:3000 - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 - Intercepting : all TCP connections -``` - -## Port-forwarding an intercepted container's sidecars - -Sidecars are containers that sit in the same pod as an application -container; they usually provide auxiliary functionality to an -application, and can usually be reached at -`localhost:${SIDECAR_PORT}`. For example, a common use case for a -sidecar is to proxy requests to a database, your application would -connect to `localhost:${SIDECAR_PORT}`, and the sidecar would then -connect to the database, perhaps augmenting the connection with TLS or -authentication. - -When intercepting a container that uses sidecars, you might want those -sidecars' ports to be available to your local application at -`localhost:${SIDECAR_PORT}`, exactly as they would be if running -in-cluster. Telepresence's `--to-pod ${PORT}` flag implements this -behavior, adding port-forwards for the port given. - -```console -$ telepresence intercept --port=: --to-pod= -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -If there are multiple ports that you need forwarded, simply repeat the -flag (`--to-pod= --to-pod=`). - -## Intercepting headless services - -Kubernetes supports creating [services without a ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services), -which, when they have a pod selector, serve to provide a DNS record that will directly point to the service's backing pods. -Telepresence supports intercepting these `headless` services as it would a regular service with a ClusterIP. -So, for example, if you have the following service: - -```yaml ---- -apiVersion: v1 -kind: Service -metadata: - name: my-headless -spec: - type: ClusterIP - clusterIP: None - selector: - service: my-headless - ports: - - port: 8080 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: my-headless - labels: - service: my-headless -spec: - replicas: 1 - serviceName: my-headless - selector: - matchLabels: - service: my-headless - template: - metadata: - labels: - service: my-headless - spec: - containers: - - name: my-headless - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -``` - -You can intercept it like any other: - -```console -$ telepresence intercept my-headless --port 8080 -Using StatefulSet my-headless -intercepted - Intercept name : my-headless - State : ACTIVE - Workload kind : StatefulSet - Destination : 127.0.0.1:8080 - Volume Mount Point: /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-524189712 - Intercepting : all TCP connections -``` - - -This utilizes an initContainer that requires `NET_ADMIN` capabilities. -If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. - - - -This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. -To enable running as GID 7777 on a specific openshift namespace, run: -oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE - - - -Intercepting headless services without a selector is not supported. - diff --git a/docs/v2.4/reference/intercepts/manual-agent.md b/docs/v2.4/reference/intercepts/manual-agent.md deleted file mode 100644 index e818171c..00000000 --- a/docs/v2.4/reference/intercepts/manual-agent.md +++ /dev/null @@ -1,221 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Manually injecting the Traffic Agent - -You can directly modify your workload's YAML configuration to add the Telepresence Traffic Agent and enable it to be intercepted. - -When you use a Telepresence intercept, Telepresence automatically edits the workload and services when you use -`telepresence uninstall --agent `. In some GitOps workflows, you may need to use the -[Telepresence Mutating Webhook](../../cluster-config/#mutating-webhook) to keep intercepted workloads unmodified -while you target changes on specific pods. - - -In situations where you don't have access to the proper permissions for numeric ports, as noted in the Note on numeric ports -section of the documentation, it is possible to manually inject the Traffic Agent. Because this is not the recommended approach -to making a workload interceptable, try the Mutating Webhook before proceeding." - - -## Procedure - -You can manually inject the agent into Deployments, StatefulSets, or ReplicaSets. The example on this page -uses the following Deployment: - - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "my-service" - labels: - service: my-service -spec: - replicas: 1 - selector: - matchLabels: - service: my-service - template: - metadata: - labels: - service: my-service - spec: - containers: - - name: echo-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -``` - -The deployment is being exposed by the following service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: "my-service" -spec: - type: ClusterIP - selector: - service: my-service - ports: - - port: 80 - targetPort: 8080 -``` - -### 1. Generating the YAML - -First, generate the YAML for the traffic-agent container: - -```console -$ telepresence genyaml container --container-name echo-container --port 8080 --output - --input deployment.yaml -args: -- agent -env: -- name: TELEPRESENCE_CONTAINER - value: echo-container -- name: _TEL_AGENT_LOG_LEVEL - value: info -- name: _TEL_AGENT_NAME - value: my-service -- name: _TEL_AGENT_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -- name: _TEL_AGENT_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP -- name: _TEL_AGENT_APP_PORT - value: "8080" -- name: _TEL_AGENT_AGENT_PORT - value: "9900" -- name: _TEL_AGENT_MANAGER_HOST - value: traffic-manager.ambassador -image: docker.io/datawire/tel2:2.4.6 -name: traffic-agent -ports: -- containerPort: 9900 - protocol: TCP -readinessProbe: - exec: - command: - - /bin/stat - - /tmp/agent/ready -resources: {} -volumeMounts: -- mountPath: /tel_pod_info - name: traffic-annotations -``` - -Next, generate the YAML for the volume: - -```console -$ telepresence genyaml volume --output - --input deployment.yaml -downwardAPI: - items: - - fieldRef: - fieldPath: metadata.annotations - path: annotations -name: traffic-annotations -``` - - -Enter `telepresence genyaml container --help` or `telepresence genyaml volume --help` for more information about these flags. - - -### 2. Injecting the YAML into the Deployment - -You need to add the `Deployment` YAML you genereated to include the container and the volume. These are placed as elements of `spec.template.spec.containers` and `spec.template.spec.volumes` respectively. -You also need to modify `spec.template.metadata.annotations` and add the annotation `telepresence.getambassador.io/manually-injected: "true"`. -These changes should look like the following: - -```diff -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "my-service" - labels: - service: my-service -spec: - replicas: 1 - selector: - matchLabels: - service: my-service - template: - metadata: - labels: - service: my-service -+ annotations: -+ telepresence.getambassador.io/manually-injected: "true" - spec: - containers: - - name: echo-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -+ - args: -+ - agent -+ env: -+ - name: TELEPRESENCE_CONTAINER -+ value: echo-container -+ - name: _TEL_AGENT_LOG_LEVEL -+ value: info -+ - name: _TEL_AGENT_NAME -+ value: my-service -+ - name: _TEL_AGENT_NAMESPACE -+ valueFrom: -+ fieldRef: -+ fieldPath: metadata.namespace -+ - name: _TEL_AGENT_POD_IP -+ valueFrom: -+ fieldRef: -+ fieldPath: status.podIP -+ - name: _TEL_AGENT_APP_PORT -+ value: "8080" -+ - name: _TEL_AGENT_AGENT_PORT -+ value: "9900" -+ - name: _TEL_AGENT_MANAGER_HOST -+ value: traffic-manager.ambassador -+ image: docker.io/datawire/tel2:2.4.6 -+ name: traffic-agent -+ ports: -+ - containerPort: 9900 -+ protocol: TCP -+ readinessProbe: -+ exec: -+ command: -+ - /bin/stat -+ - /tmp/agent/ready -+ resources: {} -+ volumeMounts: -+ - mountPath: /tel_pod_info -+ name: traffic-annotations -+ volumes: -+ - downwardAPI: -+ items: -+ - fieldRef: -+ fieldPath: metadata.annotations -+ path: annotations -+ name: traffic-annotations -``` - -### 3. Modifying the service - -Once the modified deployment YAML has been applied to the cluster, you need to modify the Service to route traffic to the Traffic Agent. -You can do this by changing the exposed `targetPort` to `9900`. The resulting service should look like: - -```diff -apiVersion: v1 -kind: Service -metadata: - name: "my-service" -spec: - type: ClusterIP - selector: - service: my-service - ports: - - port: 80 -- targetPort: 8080 -+ targetPort: 9900 -``` diff --git a/docs/v2.4/reference/linkerd.md b/docs/v2.4/reference/linkerd.md deleted file mode 100644 index 9b903fa7..00000000 --- a/docs/v2.4/reference/linkerd.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -Description: "How to get Linkerd meshed services working with Telepresence" ---- - -# Using Telepresence with Linkerd - -## Introduction -Getting started with Telepresence on Linkerd services is as simple as adding an annotation to your Deployment: - -```yaml -spec: - template: - metadata: - annotations: - config.linkerd.io/skip-outbound-ports: "8081" -``` - -The local system and the Traffic Agent connect to the Traffic Manager using its gRPC API on port 8081. Telling Linkerd to skip that port allows the Traffic Agent sidecar to fully communicate with the Traffic Manager, and therefore the rest of the Telepresence system. - -## Prerequisites -1. [Telepresence binary](../../install) -2. Linkerd control plane [installed to cluster](https://linkerd.io/2.10/tasks/install/) -3. Kubectl -4. [Working ingress controller](https://www.getambassador.io/docs/edge-stack/latest/howtos/linkerd2) - -## Deploy -Save and deploy the following YAML. Note the `config.linkerd.io/skip-outbound-ports` annotation in the metadata of the pod template. - -```yaml ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: quote -spec: - replicas: 1 - selector: - matchLabels: - app: quote - strategy: - type: RollingUpdate - template: - metadata: - annotations: - linkerd.io/inject: "enabled" - config.linkerd.io/skip-outbound-ports: "8081,8022,6001" - labels: - app: quote - spec: - containers: - - name: backend - image: docker.io/datawire/quote:0.4.1 - ports: - - name: http - containerPort: 8000 - env: - - name: PORT - value: "8000" - resources: - limits: - cpu: "0.1" - memory: 100Mi -``` - -## Connect to Telepresence -Run `telepresence connect` to connect to the cluster. Then `telepresence list` should show the `quote` deployment as `ready to intercept`: - -``` -$ telepresence list - - quote: ready to intercept (traffic-agent not yet installed) -``` - -## Run the intercept -Run `telepresence intercept quote --port 8080:80` to direct traffic from the `quote` deployment to port 8080 on your local system. Assuming you have something listening on 8080, you should now be able to see your local service whenever attempting to access the `quote` service. diff --git a/docs/v2.4/reference/restapi.md b/docs/v2.4/reference/restapi.md deleted file mode 100644 index e3934abd..00000000 --- a/docs/v2.4/reference/restapi.md +++ /dev/null @@ -1,117 +0,0 @@ -# Telepresence RESTful API server - -Telepresence can run a RESTful API server on the local host, both on the local workstation and in a pod that contains a `traffic-agent`. The server currently has two endpoints. The standard `healthz` endpoint and the `consume-here` endpoint. - -## Enabling the server -The server is enabled by setting the `telepresenceAPI.port` to a valid port number in the [Telepresence Helm Chart](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). The values may be passed explicitly to Helm during install, or configured using the [Telepresence Config](../config#restful-api-server) to impact an auto-install. - -## Querying the server -On the cluster's side, it's the `traffic-agent` of potentially intercepted pods that runs the server. The server can be accessed using `http://localhost:/` from the application container. Telepresence ensures that the container has the `TELEPRESENCE_API_PORT` environment variable set when the `traffic-agent` is installed. On the workstation, it is the `user-daemon` that runs the server. It uses the `TELEPRESENCE_API_PORT` that is conveyed in the environment of the intercept. This means that the server can be accessed the exact same way locally, provided that the environment is propagated correctly to the interceptor process. - -## Endpoints - -### healthz -The `http://localhost:/healthz` endpoint should respond with status code 200 OK. If it doesn't then something isn't configured correctly. Check that the `traffic-agent` container is present and that the `TELEPRESENCE_API_PORT` has been added to the environment of the application container and/or in the environment that is propagated to the interceptor that runs on the local workstation. - -#### test endpoint using curl -A `curl -v` call can be used to test the endpoint when an intercept is active. This example assumes that the API port is configured to be 9980. -```console -$ curl -v localhost:9980/healthz -* Trying ::1:9980... -* Connected to localhost (::1) port 9980 (#0) -> GET /healthz HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.76.1 -> Accept: */* -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Date: Fri, 26 Nov 2021 07:06:18 GMT -< Content-Length: 0 -< -* Connection #0 to host localhost left intact -``` - -### consume-here -`http://localhost:/consume-here` is intended to be queried with a set of headers, typically obtained from a Kafka message or similar, and will respond with "true" (consume the message) or "false" (leave the message on the queue). When running in the cluster, this endpoint will respond with `false` if the headers match an ongoing intercept for the same workload because it's assumed that it's up to the intercept to consume the message. When running locally, the response is inverted. Matching headers means that the message should be consumed. - -Telepresence provides the ID of the intercept in the environment variable [TELEPRESENCE_INTERCEPT_ID](../environment/#telepresence_intercept_id) during an intercept. This ID must be provided in a `x-caller-intercept-id: = ` header. Telepresence needs this to identify the caller correctly. The `` will be empty when running in the cluster, but it's harmless to provide it there too, so there's no need for conditional code. - -#### test endpoint using curl -There are three prerequisites to fulfill before testing this endpoint using `curl -v` on the workstation. -1. An intercept must be active -2. The "/healtz" endpoint must respond with OK -3. The ID of the intercept must be known. It will be visible as `x-telepresence-intercept-id` in the output of the `telepresence intercept` and `telepresence list` commands unless the intercept was started with `--http-match` flags. If it was, the `--env-file ` or `--env-json ` flag must be also be used so that the environment can be examined. The variable to look for in the file is `TELEPRESENCE_INTERCEPT_ID`. - -Assuming that the API-server runs on port 9980, that the intercept was started with `-H 'foo: bar`, we can now check that the "/consume-here" returns "true" for the given headers. -```console -$ curl -v localhost:9980/consume-here -H 'x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest' -H 'foo: bar' -* Trying ::1:9980... -* Connected to localhost (::1) port 9980 (#0) -> GET /consume-here HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.76.1 -> Accept: */* -> x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest -> foo: bar -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Content-Type: text/plain -< Date: Fri, 26 Nov 2021 06:43:28 GMT -< Content-Length: 4 -< -* Connection #0 to host localhost left intact -true% -``` - -If you can run curl from the pod, you can try the exact same URL. The result should be "false" when there's an ongoing intercept. The `x-telepresence-caller-intercept-id` is not needed when the call is made from the pod. -#### Example code: - -Here's an example filter written in Go. It divides the actual URL creation (only needs to run once) from the filter function to make the filter more performant: -```go -const portEnv = "TELEPRESENCE_API_PORT" -const interceptIdEnv = "TELEPRESENCE_INTERCEPT_ID" - -// apiURL creates the generic URL needed to access the service -func apiURL() (string, error) { - pe := os.Getenv(portEnv) - if _, err := strconv.ParseUint(pe, 10, 16); err != nil { - return "", fmt.Errorf("value %q of env %s does not represent a valid port number", pe, portEnv) - } - return "http://localhost:" + pe, nil -} - -// consumeHereURL creates the URL for the "consume-here" endpoint -func consumeHereURL() (string, error) { - apiURL, err := apiURL() - if err != nil { - return "", err - } - return apiURL + "/consume-here", nil -} - -// consumeHere expects an url created using consumeHereURL() and calls the endpoint with the given -// headers and returns the result -func consumeHere(url string, hm map[string]string) (bool, error) { - rq, err := http.NewRequest("GET", url, nil) - if err != nil { - return false, err - } - rq.Header = make(http.Header, len(hm)+1) - rq.Header.Set("X-Telepresence-Caller-Intercept-Id", os.Getenv(interceptIdEnv)) - for k, v := range hm { - rq.Header.Set(k, v) - } - rs, err := http.DefaultClient.Do(rq) - if err != nil { - return false, err - } - defer rs.Body.Close() - b, err := io.ReadAll(rs.Body) - if err != nil { - return false, err - } - return strconv.ParseBool(string(b)) -} -``` \ No newline at end of file diff --git a/docs/v2.4/reference/routing.md b/docs/v2.4/reference/routing.md deleted file mode 100644 index 061ba8fa..00000000 --- a/docs/v2.4/reference/routing.md +++ /dev/null @@ -1,69 +0,0 @@ -# Connection Routing - -## Outbound - -### DNS resolution -When requesting a connection to a host, the IP of that host must be determined. Telepresence provides DNS resolvers to help with this task. There are currently four types of resolvers but only one of them will be used on a workstation at any given time. Common for all of them is that they will propagate a selection of the host lookups to be performed in the cluster. The selection normally includes all names ending with `.cluster.local` or a currently mapped namespace but more entries can be added to the list using the `include-suffixes` option in the -[local DNS configuration](../config/#dns) - -#### Cluster side DNS lookups -The cluster side host lookup will be performed by the traffic-manager unless the client has an active intercept, in which case, the agent performing that intercept will be responsible for doing it. If the client has multiple intercepts, then all of them will be asked to perform the lookup, and the response to the client will contain the unique sum of IPs that they produce. It's therefore important to never have multiple intercepts that span more than one namespace[[1](#namespacelimit)]. The reason for asking all of them is that the workstation currently impersonates multiple containers, and it is not possible to determine on behalf of what container the lookup request is made. - -#### macOS resolver -This resolver hooks into the macOS DNS system by creating files under `/etc/resolver`. Those files correspond to some domain and contain the port number of the Telepresence resolver. Telepresence creates one such file for each of the currently mapped namespaces and `include-suffixes` option. The file `telepresence.local` contains a search path that is configured based on current intercepts so that single label names can be resolved correctly. - -#### Linux systemd-resolved resolver -This resolver registers itself as part of telepresence's [VIF](../tun-device) using `systemd-resolved` and uses the DBus API to configure domains and routes that corresponds to the current set of intercepts and namespaces. - -#### Linux overriding resolver -Linux systems that aren't configured with `systemd-resolved` will use this resolver. A Typical case is when running Telepresence [inside a docker container](../inside-container). During initialization, the resolver will first establish a _fallback_ connection to the IP passed as `--dns`, the one configured as `local-ip` in the [local DNS configuration](../config/#dns), or the primary `nameserver` registered in `/etc/resolv.conf`. It will then use iptables to actually override that IP so that requests to it instead end up in the overriding resolver, which unless it succeeds on its own, will use the _fallback_. - -#### Windows resolver -This resolver uses the DNS resolution capabilities of the [win-tun](https://www.wintun.net/) device in conjunction with [Win32_NetworkAdapterConfiguration SetDNSDomain](https://docs.microsoft.com/en-us/powershell/scripting/samples/performing-networking-tasks?view=powershell-7.2#assigning-the-dns-domain-for-a-network-adapter). - -#### DNS caching -The Telepresence DNS resolver often changes its configuration. This means that Telepresence must either flush the DNS caches on the local host, or ensure that DNS-records returned from the Telepresence resolver aren't cached (or cached for a very short time). All operating systems have different ways of flushing the DNS caches and even different versions of one system may have differences. Also, on some systems it is necessary to actually kill and restart processes to ensure a proper flush, which in turn may result in network instabilities. - -Starting with 2.4.7, Telepresence will no longer flush the host's DNS caches. Instead, all records will have a short Time To Live (TTL) so that such caches evict the entries quickly. This causes increased load on the Telepresence resolver (shorter TTL means more frequent queries) and to cater for that, telepresence now has an internal cache to minimize the number of DNS queries that it sends to the cluster. This cache is flushed as needed without causing instabilities. - -### Routing - -#### Subnets -The Telepresence `traffic-manager` service is responsible for discovering the cluster's service subnet and all subnets used by the pods. In order to do this, it needs permission to create a dummy service[[2](#servicesubnet)] in its own namespace, and the ability to list, get, and watch nodes and pods. Most clusters will expose the pod subnets as `podCIDR` in the `Node` while others, like Amazon EKS, don't. Telepresence will then fall back to deriving the subnets from the IPs of all pods. If you'd like to choose a specific method for discovering subnets, or want to provide the list yourself, you can use the `podCIDRStrategy` configuration value in the [helm](../../install/helm) chart to do that. - -The complete set of subnets that the [VIF](../tun-device) will be configured with is dynamic and may change during a connection's life cycle as new nodes arrive or disappear from the cluster. The set consists of what that the traffic-manager finds in the cluster, and the subnets configured using the [also-proxy](../config#alsoproxy) configuration option. Telepresence will remove subnets that are equal to, or completely covered by, other subnets. - -#### Connection origin -A request to connect to an IP-address that belongs to one of the subnets of the [VIF](../tun-device) will cause a connection request to be made in the cluster. As with host name lookups, the request will originate from the traffic-manager unless the client has ongoing intercepts. If it does, one of the intercepted pods will be chosen, and the request will instead originate from that pod. This is a best-effort approach. Telepresence only knows that the request originated from the workstation. It cannot know that it is intended to originate from a specific pod when multiple intercepts are active. - -A `--local-only` intercept will not have any effect on the connection origin because there is no pod from which the connection can originate. The intercept must be made on a workload that has been deployed in the cluster if there's a requirement for correct connection origin. - -There are multiple reasons for doing this. One is that it is important that the request originates from the correct namespace. Example: - -```bash -curl some-host -``` -results in a http request with header `Host: some-host`. Now, if a service-mesh like Istio performs header based routing, then it will fail to find that host unless the request originates from the same namespace as the host resides in. Another reason is that the configuration of a service mesh can contain very strict rules. If the request then originates from the wrong pod, it will be denied. Only one intercept at a time can be used if there is a need to ensure that the chosen pod is exactly right. - -### Recursion detection -It is common that clusters used in development, such as Minikube, Minishift or k3s, run on the same host as the Telepresence client, often in a Docker container. Such clusters may have access to host network, which means that both DNS and L4 routing may be subjected to recursion. - -#### DNS recursion -When a local cluster's DNS-resolver fails to resolve a hostname, it may fall back to querying the local host network. This means that the Telepresence resolver will be asked to resolve a query that was issued from the cluster. Telepresence must check if such a query is recursive because there is a chance that it actually originated from the Telepresence DNS resolver and was dispatched to the `traffic-manager`, or a `traffic-agent`. - -Telepresence handles this by sending one initial DNS-query to resolve the hostname "tel2-recursion-check.kube-system". If the cluster runs locally, and has access to the local host's network, then that query will recurse back into the Telepresence resolver. Telepresence remembers this and alters its own behavior so that queries that are believed to be recursions are detected and respond with an NXNAME record. Telepresence performs this solution to the best of its ability, but may not be completely accurate in all situations. There's a chance that the DNS-resolver will yield a false negative for the second query if the same hostname is queried more than once in rapid succession, that is when the second query is made before the first query has received a response from the cluster. - -#### Connect recursion -A cluster running locally may dispatch connection attempts to non-existing host:port combinations to the host network. This means that they may reach the Telepresence [VIF](../tun-device). Endless recursions occur if the VIF simply dispatches such attempts on to the cluster. - -The telepresence client handles this by serializing all connection attempts to one specific IP:PORT, trapping all subsequent attempts to connect to that IP:PORT until the first attempt has completed. If the first attempt was deemed a success, then the currently trapped attempts are allowed to proceed. If the first attempt failed, then the currently trapped attempts fail. - -## Inbound - -The traffic-manager and traffic-agent are mutually responsible for setting up the necessary connection to the workstation when an intercept becomes active. In versions prior to 2.3.2, this would be accomplished by the traffic-manager creating a port dynamically that it would pass to the traffic-agent. The traffic-agent would then forward the intercepted connection to that port, and the traffic-manager would forward it to the workstation. This lead to problems when integrating with service meshes like Istio since those dynamic ports needed to be configured. It also imposed an undesired requirement to be able to use mTLS between the traffic-manager and traffic-agent. - -In 2.3.2, this changes, so that the traffic-agent instead creates a tunnel to the traffic-manager using the already existing gRPC API connection. The traffic-manager then forwards that using another tunnel to the workstation. This is completely invisible to other service meshes and is therefore much easier to configure. - -##### Footnotes: -

1: A future version of Telepresence will not allow concurrent intercepts that span multiple namespaces.

-

2: The error message from an attempt to create a service in a bad subnet contains the service subnet. The trick of creating a dummy service is currently the only way to get Kubernetes to expose that subnet.

diff --git a/docs/v2.4/reference/tun-device.md b/docs/v2.4/reference/tun-device.md deleted file mode 100644 index 4410f6f3..00000000 --- a/docs/v2.4/reference/tun-device.md +++ /dev/null @@ -1,27 +0,0 @@ -# Networking through Virtual Network Interface - -The Telepresence daemon process creates a Virtual Network Interface (VIF) when Telepresence connects to the cluster. The VIF ensures that the cluster's subnets are available to the workstation. It also intercepts DNS requests and forwards them to the traffic-manager which in turn forwards them to intercepted agents, if any, or performs a host lookup by itself. - -### TUN-Device -The VIF is a TUN-device, which means that it communicates with the workstation in terms of L3 IP-packets. The router will recognize UDP and TCP packets and tunnel their payload to the traffic-manager via its encrypted gRPC API. The traffic-manager will then establish corresponding connections in the cluster. All protocol negotiation takes place in the client because the VIF takes care of the L3 to L4 translation (i.e. the tunnel is L4, not L3). - -## Gains when using the VIF - -### Both TCP and UDP -The TUN-device is capable of routing both TCP and UDP for outbound traffic. Earlier versions of Telepresence would only allow TCP. Future enhancements might be to also route inbound UDP, and perhaps a selection of ICMP packages (to allow for things like `ping`). - -### No SSH required - -The VIF approach is somewhat similar to using `sshuttle` but without -any requirements for extra software, configuration or connections. -Using the VIF means that only one single connection needs to be -forwarded through the Kubernetes apiserver (à la `kubectl -port-forward`), using only one single port. There is no need for -`ssh` in the client nor for `sshd` in the traffic-manager. This also -means that the traffic-manager container can run as the default user. - -#### sshfs without ssh encryption -When a POD is intercepted, and its volumes are mounted on the local machine, this mount is performed by [sshfs](https://github.com/libfuse/sshfs). Telepresence will run `sshfs -o slave` which means that instead of using `ssh` to establish an encrypted communication to an `sshd`, which in turn terminates the encryption and forwards to `sftp`, the `sshfs` will talk `sftp` directly on its `stdin/stdout` pair. Telepresence tunnels that directly to an `sftp` in the agent using its already encrypted gRPC API. As a result, no `sshd` is needed in client nor in the traffic-agent, and the traffic-agent container can run as the default user. - -### No Firewall rules -With the VIF in place, there's no longer any need to tamper with firewalls in order to establish IP routes. The VIF makes the cluster subnets available during connect, and the kernel will perform the routing automatically. When the session ends, the kernel is also responsible for cleaning up. diff --git a/docs/v2.4/reference/vpn.md b/docs/v2.4/reference/vpn.md deleted file mode 100644 index 19c8508c..00000000 --- a/docs/v2.4/reference/vpn.md +++ /dev/null @@ -1,157 +0,0 @@ - -
- -# Telepresence and VPNs - -## The test-vpn command - -You can make use of the `telepresence test-vpn` command to diagnose issues -with your VPN setup. -This guides you through a series of steps to figure out if there are -conflicts between your VPN configuration and telepresence. - -### Prerequisites - -Before running `telepresence test-vpn` you should ensure that your VPN is -in split-tunnel mode. -This means that only traffic that _must_ pass through the VPN is directed -through it; otherwise, the test results may be inaccurate. - -You may need to configure this on both the client and server sides. -Client-side, taking the Tunnelblick client as an example, you must ensure that -the `Route all IPv4 traffic through the VPN` tickbox is not enabled: - - - -Server-side, taking AWS' ClientVPN as an example, you simply have to enable -split-tunnel mode: - - - -In AWS, this setting can be toggled without reprovisioning the VPN. Other cloud providers may work differently. - -### Testing the VPN configuration - -To run it, enter: - -```console -$ telepresence test-vpn -``` - -The test-vpn tool begins by asking you to disconnect from your VPN; ensure you are disconnected then -press enter: - -``` -Telepresence Root Daemon is already stopped -Telepresence User Daemon is already stopped -Please disconnect from your VPN now and hit enter once you're disconnected... -``` - -Once it's gathered information about your network configuration without an active connection, -it will ask you to connect to the VPN: - -``` -Please connect to your VPN now and hit enter once you're connected... -``` - -It will then connect to the cluster: - - -``` -Launching Telepresence Root Daemon -Launching Telepresence User Daemon -Connected to context arn:aws:eks:us-east-1:914373874199:cluster/josec-tp-test-vpn-cluster (https://07C63820C58A0426296DAEFC73AED10C.gr7.us-east-1.eks.amazonaws.com) -Telepresence Root Daemon quitting... done -Telepresence User Daemon quitting... done -``` - -And show you the results of the test: - -``` ----------- Test Results: -❌ pod subnet 10.0.0.0/19 is masking VPN-routed CIDR 10.0.0.0/16. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/19 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 10.0.0.0/16 are placed in the never-proxy list -✅ svc subnet 10.19.0.0/16 is clear of VPN - -Please see https://www.telepresence.io/docs/v2.4/reference/vpn for more info on these corrective actions, as well as examples - -Still having issues? Please create a new github issue at https://github.com/telepresenceio/telepresence/issues/new?template=Bug_report.md - Please make sure to add the following to your issue: - * Run `telepresence loglevel debug`, try to connect, then run `telepresence gather_logs`. It will produce a zipfile that you should attach to the issue. - * Which VPN client are you using? - * Which VPN server are you using? - * How is your VPN pushing DNS configuration? It may be useful to add the contents of /etc/resolv.conf -``` - -#### Interpreting test results - -##### Case 1: VPN masked by cluster - -In an instance where the VPN is masked by the cluster, the test-vpn tool informs you that a pod or service subnet is masking a CIDR that the VPN -routes: - -``` -❌ pod subnet 10.0.0.0/19 is masking VPN-routed CIDR 10.0.0.0/16. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/19 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 10.0.0.0/16 are placed in the never-proxy list -``` - -This means that all VPN hosts within `10.0.0.0/19` will be rendered inaccessible while -telepresence is connected. - -The ideal resolution in this case is to move the pods to a different subnet. This is possible, -for example, in Amazon EKS by configuring a [new CIDR range](https://aws.amazon.com/premiumsupport/knowledge-center/eks-multiple-cidr-ranges/) for the pods. -In this case, configuring the pods to be located in `10.1.0.0/19` clears the VPN and allows you -to reach hosts inside the VPC's `10.0.0.0/19` - -However, it is not always possible to move the pods to a different subnet. -In these cases, you should use the [never-proxy](../config#neverproxy) configuration to prevent certain -hosts from being masked. -This might be particularly important for DNS resolution. In an AWS ClientVPN VPN it is often -customary to set the `.2` host as a DNS server (e.g. `10.0.0.2` in this case): - - - -If this is the case for your VPN, you should place the DNS server in the never-proxy list for your -cluster. In your kubeconfig file, add a `telepresence` extension like so: - -```yaml -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - never-proxy: - - 10.0.0.2/32 -``` - -##### Case 2: Cluster masked by VPN - -In an instance where the Cluster is masked by the VPN, the test-vpn tool informs you that a pod or service subnet is being masked by a CIDR -that the VPN routes: - -``` -❌ pod subnet 10.0.0.0/8 being masked by VPN-routed CIDR 10.0.0.0/16. This usually means that Telepresence will not be able to connect to your cluster. To resolve: - * Move pod subnet 10.0.0.0/8 to a subnet not mapped by the VPN - * If this is not possible, consider shrinking the mask of the 10.0.0.0/16 CIDR (e.g. from /16 to /8), or disabling split-tunneling -``` - -Typically this means that pods within `10.0.0.0/8` are not accessible while the VPN is -connected. - -As with the first case, the ideal resolution is to move the pods away, but this may not always -be possible. In that case, your best bet is to attempt to shrink the VPN's CIDR -(that is, make it route more hosts) to make Telepresence's routes win by virtue of specificity. -One easy way to do this may be by disabling split tunneling (see the [prerequisites](#prerequisites) -section for more on split-tunneling). - -Note that once you fix this, you may find yourself landing again in [Case 1](#case-1), and may need -to use never-proxy rules to whitelist hosts in the VPN: - -``` -❌ pod subnet 10.0.0.0/8 is masking VPN-routed CIDR 0.0.0.0/1. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/8 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 0.0.0.0/1 are placed in the never-proxy list -``` -
diff --git a/docs/v2.4/release-notes/no-ssh.png b/docs/v2.4/release-notes/no-ssh.png deleted file mode 100644 index 025f20ab..00000000 Binary files a/docs/v2.4/release-notes/no-ssh.png and /dev/null differ diff --git a/docs/v2.4/release-notes/run-tp-in-docker.png b/docs/v2.4/release-notes/run-tp-in-docker.png deleted file mode 100644 index 53b66a9b..00000000 Binary files a/docs/v2.4/release-notes/run-tp-in-docker.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.2.png b/docs/v2.4/release-notes/telepresence-2.2.png deleted file mode 100644 index 43abc7e8..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.2.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.0-homebrew.png b/docs/v2.4/release-notes/telepresence-2.3.0-homebrew.png deleted file mode 100644 index e203a975..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.0-homebrew.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.0-loglevels.png b/docs/v2.4/release-notes/telepresence-2.3.0-loglevels.png deleted file mode 100644 index 3d628c54..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.0-loglevels.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.1-alsoProxy.png b/docs/v2.4/release-notes/telepresence-2.3.1-alsoProxy.png deleted file mode 100644 index 4052b927..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.1-alsoProxy.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.1-brew.png b/docs/v2.4/release-notes/telepresence-2.3.1-brew.png deleted file mode 100644 index 2af42490..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.1-brew.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.1-dns.png b/docs/v2.4/release-notes/telepresence-2.3.1-dns.png deleted file mode 100644 index c6335e7a..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.1-dns.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.1-inject.png b/docs/v2.4/release-notes/telepresence-2.3.1-inject.png deleted file mode 100644 index aea1003e..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.1-inject.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.1-large-file-transfer.png b/docs/v2.4/release-notes/telepresence-2.3.1-large-file-transfer.png deleted file mode 100644 index 48ceb381..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.1-large-file-transfer.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.1-trafficmanagerconnect.png b/docs/v2.4/release-notes/telepresence-2.3.1-trafficmanagerconnect.png deleted file mode 100644 index 78128c17..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.1-trafficmanagerconnect.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.2-subnets.png b/docs/v2.4/release-notes/telepresence-2.3.2-subnets.png deleted file mode 100644 index 778c722a..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.2-subnets.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.2-svcport-annotation.png b/docs/v2.4/release-notes/telepresence-2.3.2-svcport-annotation.png deleted file mode 100644 index 1e1e9240..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.2-svcport-annotation.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.3-helm.png b/docs/v2.4/release-notes/telepresence-2.3.3-helm.png deleted file mode 100644 index 7b81480a..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.3-helm.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.3-namespace-config.png b/docs/v2.4/release-notes/telepresence-2.3.3-namespace-config.png deleted file mode 100644 index 7864d3a3..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.3-namespace-config.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.3-to-pod.png b/docs/v2.4/release-notes/telepresence-2.3.3-to-pod.png deleted file mode 100644 index aa7be3f6..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.3-to-pod.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.4-improved-error.png b/docs/v2.4/release-notes/telepresence-2.3.4-improved-error.png deleted file mode 100644 index fa8a1298..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.4-improved-error.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.4-ip-error.png b/docs/v2.4/release-notes/telepresence-2.3.4-ip-error.png deleted file mode 100644 index 1d37380c..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.4-ip-error.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.5-agent-config.png b/docs/v2.4/release-notes/telepresence-2.3.5-agent-config.png deleted file mode 100644 index 67d6d3e8..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.5-agent-config.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.5-grpc-max-receive-size.png b/docs/v2.4/release-notes/telepresence-2.3.5-grpc-max-receive-size.png deleted file mode 100644 index 32939f9d..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.5-grpc-max-receive-size.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.5-skipLogin.png b/docs/v2.4/release-notes/telepresence-2.3.5-skipLogin.png deleted file mode 100644 index bf79c191..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.5-skipLogin.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png b/docs/v2.4/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png deleted file mode 100644 index d29a05ad..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.7-keydesc.png b/docs/v2.4/release-notes/telepresence-2.3.7-keydesc.png deleted file mode 100644 index 9bffe5cc..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.7-keydesc.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.3.7-newkey.png b/docs/v2.4/release-notes/telepresence-2.3.7-newkey.png deleted file mode 100644 index c7d47c42..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.3.7-newkey.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.0-cloud-messages.png b/docs/v2.4/release-notes/telepresence-2.4.0-cloud-messages.png deleted file mode 100644 index ffd045ae..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.0-cloud-messages.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.0-windows.png b/docs/v2.4/release-notes/telepresence-2.4.0-windows.png deleted file mode 100644 index d27ba254..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.0-windows.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.1-systema-vars.png b/docs/v2.4/release-notes/telepresence-2.4.1-systema-vars.png deleted file mode 100644 index c098b439..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.1-systema-vars.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.10-actions.png b/docs/v2.4/release-notes/telepresence-2.4.10-actions.png deleted file mode 100644 index 6d849ac2..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.10-actions.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.10-intercept-config.png b/docs/v2.4/release-notes/telepresence-2.4.10-intercept-config.png deleted file mode 100644 index e3f1136a..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.10-intercept-config.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.4-gather-logs.png b/docs/v2.4/release-notes/telepresence-2.4.4-gather-logs.png deleted file mode 100644 index 7db54173..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.4-gather-logs.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.5-logs-anonymize.png b/docs/v2.4/release-notes/telepresence-2.4.5-logs-anonymize.png deleted file mode 100644 index edd01fde..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.5-logs-anonymize.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.5-pod-yaml.png b/docs/v2.4/release-notes/telepresence-2.4.5-pod-yaml.png deleted file mode 100644 index 3f565c4f..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.5-pod-yaml.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.5-preview-url-questions.png b/docs/v2.4/release-notes/telepresence-2.4.5-preview-url-questions.png deleted file mode 100644 index 1823aaa1..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.5-preview-url-questions.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.6-help-text.png b/docs/v2.4/release-notes/telepresence-2.4.6-help-text.png deleted file mode 100644 index aab9178a..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.6-help-text.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.8-health-check.png b/docs/v2.4/release-notes/telepresence-2.4.8-health-check.png deleted file mode 100644 index e10a0b47..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.8-health-check.png and /dev/null differ diff --git a/docs/v2.4/release-notes/telepresence-2.4.8-vpn.png b/docs/v2.4/release-notes/telepresence-2.4.8-vpn.png deleted file mode 100644 index fbb21588..00000000 Binary files a/docs/v2.4/release-notes/telepresence-2.4.8-vpn.png and /dev/null differ diff --git a/docs/v2.4/release-notes/tunnel.jpg b/docs/v2.4/release-notes/tunnel.jpg deleted file mode 100644 index 59a0397e..00000000 Binary files a/docs/v2.4/release-notes/tunnel.jpg and /dev/null differ diff --git a/docs/v2.4/releaseNotes.yml b/docs/v2.4/releaseNotes.yml deleted file mode 100644 index b91a78ec..00000000 --- a/docs/v2.4/releaseNotes.yml +++ /dev/null @@ -1,1085 +0,0 @@ -# This file should be placed in the folder for the version of the -# product that's meant to be documented. A `/release-notes` page will -# be automatically generated and populated at build time. -# -# Note that an entry needs to be added to the `doc-links.yml` file in -# order to surface the release notes in the table of contents. -# -# The YAML in this file should contain: -# -# changelog: An (optional) URL to the CHANGELOG for the product. -# items: An array of releases with the following attributes: -# - version: The (optional) version number of the release, if applicable. -# - date: The date of the release in the format YYYY-MM-DD. -# - notes: An array of noteworthy changes included in the release, each having the following attributes: -# - type: The type of change, one of `bugfix`, `feature`, `security` or `change`. -# - title: A short title of the noteworthy change. -# - body: >- -# Two or three sentences describing the change and why it -# is noteworthy. This is HTML, not plain text or -# markdown. It is handy to use YAML's ">-" feature to -# allow line-wrapping. -# - image: >- -# The URL of an image that visually represents the -# noteworthy change. This path is relative to the -# `release-notes` directory; if this file is -# `FOO/releaseNotes.yml`, then the image paths are -# relative to `FOO/release-notes/`. -# - docs: The path to the documentation page where additional information can be found. - -docTitle: Telepresence Release Notes -docDescription: >- - Release notes for Telepresence by Ambassador Labs, a CNCF project - that enables developers to iterate rapidly on Kubernetes - microservices by arming them with infinite-scale development - environments, access to instantaneous feedback loops, and highly - customizable development environments. - -changelog: https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md - -items: - - version: 2.4.11 - date: "2022-02-10" - notes: - - type: change - title: Add additional logging to troubleshoot intermittent issues with intercepts - body: >- - We've noticed some issues with intercepts in v2.4.10, so we are releasing a version - with enhanced logging to help debug and fix the issue. - - version: 2.4.10 - date: "2022-01-13" - notes: - - type: feature - title: Application Protocol Strategy - body: >- - The strategy used when selecting the application protocol for personal intercepts can now be configured using - the intercept.appProtocolStrategy in the config.yml file. - docs: reference/config/#intercept - image: telepresence-2.4.10-intercept-config.png - - type: feature - title: Helm value for the Application Protocol Strategy - body: >- - The strategy when selecting the application protocol for personal intercepts in agents injected by the - mutating webhook can now be configured using the agentInjector.appProtocolStrategy in the Helm chart. - docs: install/helm - - type: feature - title: New --http-plaintext option - body: >- - The flag --http-plaintext can be used to ensure that an intercept uses plaintext http or grpc when - communicating with the workstation process. - docs: reference/intercepts/#tls - - type: feature - title: Configure the default intercept port - body: >- - The port used by default in the telepresence intercept command (8080), can now be changed by setting - the intercept.defaultPort in the config.yml file. - docs: reference/config/#intercept - - type: change - title: Telepresence CI now uses Github Actions - body: >- - Telepresence now uses Github Actions for doing unit and integration testing. It is - now easier for contributors to run tests on PRs since maintainers can add an - "ok to test" label to PRs (including from forks) to run integration tests. - docs: https://github.com/telepresenceio/telepresence/actions - image: telepresence-2.4.10-actions.png - - type: bugfix - title: Check conditions before asking questions - body: >- - User will not be asked to log in or add ingress information when creating an intercept until a check has been - made that the intercept is possible. - docs: reference/intercepts/ - - type: bugfix - title: Fix invalid log statement - body: >- - Telepresence will no longer log invalid: "unhandled connection control message: code DIAL_OK" errors. - - type: bugfix - title: Log errors from sshfs/sftp - body: >- - Output to stderr from the traffic-agent's sftp and the client's sshfs processes - are properly logged as errors. - - type: bugfix - title: Don't use Windows path separators in workload pod template - body: >- - Auto installer will no longer not emit backslash separators for the /tel-app-mounts paths in the - traffic-agent container spec when running on Windows. - - version: 2.4.9 - date: "2021-12-09" - notes: - - type: bugfix - title: Helm upgrade nil pointer error - body: >- - A helm upgrade using the --reuse-values flag no longer fails on a "nil pointer" error caused by a nil - telpresenceAPI value. - docs: install/helm#upgrading-the-traffic-manager - - version: 2.4.8 - date: "2021-12-03" - notes: - - type: feature - title: VPN diagnostics tool - body: >- - There is a new subcommand, test-vpn, that can be used to diagnose connectivity issues with a VPN. - See the VPN docs for more information on how to use it. - docs: reference/vpn - image: telepresence-2.4.8-vpn.png - - - type: feature - title: RESTful API service - body: >- - A RESTful service was added to Telepresence, both locally to the client and to the traffic-agent to - help determine if messages with a set of headers should be consumed or not from a message queue where the - intercept headers are added to the messages. - docs: reference/restapi - image: telepresence-2.4.8-health-check.png - - - type: change - title: TELEPRESENCE_LOGIN_CLIENT_ID env variable no longer used - body: >- - You could previously configure this value, but there was no reason to change it, so the value - was removed. - - - type: bugfix - title: Tunneled network connections behave more like ordinary TCP connections. - body: >- - When using Telepresence with an external cloud provider for extensions, those tunneled - connections now behave more like TCP connections, especially when it comes to timeouts. - We've also added increased testing around these types of connections. - - version: 2.4.7 - date: "2021-11-24" - notes: - - type: feature - title: Injector service-name annotation - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-service-name, that can be used to set the name of the service to be intercepted. - This will help disambiguate which service to intercept for when a workload is exposed by multiple services, such as can happen with Argo Rollouts - docs: reference/cluster-config#service-name-annotation - - type: feature - title: Skip the Ingress Dialogue - body: >- - You can now skip the ingress dialogue by setting the ingress parameters in the corresponding flags. - docs: reference/intercepts#skipping-the-ingress-dialogue - - type: feature - title: Never proxy subnets - body: >- - The kubeconfig extensions now support a never-proxy argument, - analogous to also-proxy, that defines a set of subnets that - will never be proxied via telepresence. - docs: reference/config#neverproxy - - type: change - title: Daemon versions check - body: >- - Telepresence now checks the versions of the client and the daemons and asks the user to quit and restart if they don't match. - - type: change - title: No explicit DNS flushes - body: >- - Telepresence DNS now uses a very short TTL instead of explicitly flushing DNS by killing the mDNSResponder or doing resolvectl flush-caches - docs: reference/routing#dns-caching - - type: bugfix - title: Legacy flags now work with global flags - body: >- - Legacy flags such as `--swap-deployment` can now be used together with global flags. - - type: bugfix - title: Outbound connection closing - body: >- - Outbound connections are now properly closed when the peer closes. - - type: bugfix - title: Prevent DNS recursion - body: >- - The DNS-resolver will trap recursive resolution attempts (may happen when the cluster runs in a docker-container on the client). - docs: reference/routing#dns-recursion - - type: bugfix - title: Prevent network recursion - body: >- - The TUN-device will trap failed connection attempts that results in recursive calls back into the TUN-device (may happen when the - cluster runs in a docker-container on the client). - docs: reference/routing#connect-recursion - - type: bugfix - title: Traffic Manager deadlock fix - body: >- - The Traffic Manager no longer runs a risk of entering a deadlock when a new Traffic agent arrives. - - type: bugfix - title: webhookRegistry config propagation - body: >- - The configured webhookRegistry is now propagated to the webhook installer even if no webhookAgentImage has been set. - docs: reference/config#images - - type: bugfix - title: Login refreshes expired tokens - body: >- - When a user's token has expired, telepresence login - will prompt the user to log in again to get a new token. Previously, - the user had to telepresence quit and telepresence logout - to get a new token. - docs: https://github.com/telepresenceio/telepresence/issues/2062 - - version: 2.4.6 - date: "2021-11-02" - notes: - - type: feature - title: Manually injecting Traffic Agent - body: >- - Telepresence now supports manually injecting the traffic-agent YAML into workload manifests. - Use the genyaml command to create the sidecar YAML, then add the telepresence.getambassador.io/manually-injected: "true" annotation to your pods to allow Telepresence to intercept them. - docs: reference/intercepts/manual-agent - - - type: feature - title: Telepresence CLI released for Apple silicon - body: >- - Telepresence is now built and released for Apple silicon. - docs: install/?os=macos - - - type: change - title: Telepresence help text now links to telepresence.io - body: >- - We now include a link to our documentation when you run telepresence --help. This will make it easier - for users to find this page whether they acquire Telepresence through Brew or some other mechanism. - image: telepresence-2.4.6-help-text.png - - - type: bugfix - title: Fixed bug when API server is inside CIDR range of pods/services - body: >- - If the API server for your kubernetes cluster had an IP that fell within the - subnet generated from pods/services in a kubernetes cluster, it would proxy traffic - to the API server which would result in hanging or a failed connection. We now ensure - that the API server is explicitly not proxied. - - version: 2.4.5 - date: "2021-10-15" - notes: - - type: feature - title: Get pod yaml with gather-logs command - body: >- - Adding the flag --get-pod-yaml to your request will get the - pod yaml manifest for all kubernetes components you are getting logs for - ( traffic-manager and/or pods containing a - traffic-agent container). This flag is set to false - by default. - docs: reference/client - image: telepresence-2.4.5-pod-yaml.png - - - type: feature - title: Anonymize pod name + namespace when using gather-logs command - body: >- - Adding the flag --anonymize to your command will - anonymize your pod names + namespaces in the output file. We replace the - sensitive names with simple names (e.g. pod-1, namespace-2) to maintain - relationships between the objects without exposing the real names of your - objects. This flag is set to false by default. - docs: reference/client - image: telepresence-2.4.5-logs-anonymize.png - - - type: feature - title: Added context and defaults to ingress questions when creating a preview URL - body: >- - Previously, we referred to OSI model layers when asking these questions, but this - terminology is not commonly used. The questions now provide a clearer context for the user, along with a default answer as an example. - docs: howtos/preview-urls - image: telepresence-2.4.5-preview-url-questions.png - - - type: feature - title: Support for intercepting headless services - body: >- - Intercepting headless services is now officially supported. You can request a - headless service on whatever port it exposes and get a response from the - intercept. This leverages the same approach as intercepting numeric ports when - using the mutating webhook injector, mainly requires the initContainer - to have NET_ADMIN capabilities. - docs: reference/intercepts/#intercepting-headless-services - - - type: change - title: Use one tunnel per connection instead of multiplexing into one tunnel - body: >- - We have changed Telepresence so that it uses one tunnel per connection instead - of multiplexing all connections into one tunnel. This will provide substantial - performance improvements. Clients will still be backwards compatible with older - managers that only support multiplexing. - - - type: bugfix - title: Added checks for Telepresence kubernetes compatibility - body: >- - Telepresence currently works with Kubernetes server versions 1.17.0 - and higher. We have added logs in the connector and traffic-manager - to let users know when they are using Telepresence with a cluster it doesn't support. - docs: reference/cluster-config - - - type: bugfix - title: Traffic Agent security context is now only added when necessary - body: >- - When creating an intercept, Telepresence will now only set the traffic agent's GID - when strictly necessary (i.e. when using headless services or numeric ports). This mitigates - an issue on openshift clusters where the traffic agent can fail to be created due to - openshift's security policies banning arbitrary GIDs. - - - version: 2.4.4 - date: "2021-09-27" - notes: - - type: feature - title: Numeric ports in agent injector - body: >- - The agent injector now supports injecting Traffic Agents into pods that have unnamed ports. - docs: reference/cluster-config/#note-on-numeric-ports - - - type: feature - title: New subcommand to gather logs and export into zip file - body: >- - Telepresence has logs for various components (the - traffic-manager, traffic-agents, the root and - user daemons), which are integral for understanding and debugging - Telepresence behavior. We have added the telepresence - gather-logs command to make it simple to compile logs for - all Telepresence components and export them in a zip file that can - be shared to others and/or included in a github issue. For more - information on usage, run telepresence gather-logs --help - . - docs: reference/client - image: telepresence-2.4.4-gather-logs.png - - - type: feature - title: Pod CIDR strategy is configurable in Helm chart - body: >- - Telepresence now enables you to directly configure how to get - pod CIDRs when deploying Telepresence with the Helm chart. - The default behavior remains the same. We've also introduced - the ability to explicitly set what the pod CIDRs should be. - docs: install/helm - - - type: bugfix - title: Compute pod CIDRs more efficiently - body: >- - When computing subnets using the pod CIDRs, the traffic-manager - now uses less CPU cycles. - docs: reference/routing/#subnets - - - type: bugfix - title: Prevent busy loop in traffic-manager - body: >- - In some circumstances, the traffic-manager's CPU - would max out and get pinned at its limit. This required a - shutdown or pod restart to fix. We've added some fixes - to prevent the traffic-manager from getting into this state. - - - type: bugfix - title: Added a fixed buffer size to TUN-device - body: >- - The TUN-device now has a max buffer size of 64K. This prevents the - buffer from growing limitlessly until it receies a PSH, which could - be a blocking operation when receiving lots of TCP-packets. - docs: reference/tun-device - - - type: bugfix - title: Fix hanging user daemon - body: >- - When Telepresence encountered an issue connecting to the cluster or - the root daemon, it could hang indefintely. It now will error correctly - when it encounters that situation. - - - type: bugfix - title: Improved proprietary agent connectivity - body: >- - To determine whether the environment cluster is air-gapped, the - proprietary agent attempts to connect to the cloud during startup. - To deal with a possible initial failure, the agent backs off - and retries the connection with an increasing backoff duration. - - - type: bugfix - title: Telepresence correctly reports intercept port conflict - body: >- - When creating a second intercept targetting the same local port, - it now gives the user an informative error message. Additionally, - it tells them which intercept is currently using that port to make - it easier to remedy. - - - version: 2.4.3 - date: "2021-09-15" - notes: - - type: feature - title: Environment variable TELEPRESENCE_INTERCEPT_ID available in interceptor's environment - body: >- - When you perform an intercept, we now include a TELEPRESENCE_INTERCEPT_ID environment - variable in the environment. - docs: reference/environment/#telepresence-environment-variables - - - type: bugfix - title: Improved daemon stability - body: >- - Fixed a timing bug that sometimes caused a "daemon did not start" failure. - - - type: bugfix - title: Complete logs for Windows - body: >- - Crash stack traces and other errors were incorrectly not written to log files. This has - been fixed so logs for Windows should be at parity with the ones in MacOS and Linux. - - - type: bugfix - title: Log rotation fix for Linux kernel 4.11+ - body: >- - On Linux kernel 4.11 and above, the log file rotation now properly reads the - birth-time of the log file. Older kernels continue to use the old behavior - of using the change-time in place of the birth-time. - - - type: bugfix - title: Improved error messaging - body: >- - When Telepresence encounters an error, it tells the user where they should look for - logs related to the error. We have refined this so that it only tells users to look - for errors in the daemon logs for issues that are logged there. - - - type: bugfix - title: Stop resolving localhost - body: >- - When using the overriding DNS resolver, it will no longer apply search paths when - resolving localhost, since that should be resolved on the user's machine - instead of the cluster. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Variable cluster domain - body: >- - Previously, the cluster domain was hardcoded to cluster.local. While this - is true for many kubernetes clusters, it is not for all of them. Now this value is - retrieved from the traffic-manager. - - - type: bugfix - title: Improved cleanup of traffic-agents - body: >- - Telepresence now uninstalls traffic-agents installed via mutating webhook - when using telepresence uninstall --everything. - - - type: bugfix - title: More large file transfer fixes - body: >- - Downloading large files during an intercept will no longer cause timeouts and hanging - traffic-agents. - - - type: bugfix - title: Setting --mount to false when intercepting works as expected - body: >- - When using --mount=false while performing an intercept, the file system - was still mounted. This has been remedied so the intercept behavior respects the - flag. - docs: reference/volume - - - type: bugfix - title: Traffic-manager establishes outbound connections in parallel - body: >- - Previously, the traffic-manager established outbound connections - sequentially. This resulted in slow (and failing) Dial calls would - block all outbound traffic from the workstation (for up to 30 seconds). We now - establish these connections in parallel so that won't occur. - docs: reference/routing/#outbound - - - type: bugfix - title: Status command reports correct DNS settings - body: >- - Telepresence status now correctly reports DNS settings for all operating - systems, instead of Local IP:nil, Remote IP:nil when they don't exist. - - - version: 2.4.2 - date: "2021-09-01" - notes: - - type: feature - title: New subcommand to temporarily change log-level - body: >- - We have added a new telepresence loglevel subcommand that enables users - to temporarily change the log-level for the local demons, the traffic-manager and - the traffic-agents. While the logLevels settings from the config will - still be used by default, this can be helpful if you are currently experiencing an issue and - want to have higher fidelity logs, without doing a telepresence quit and - telepresence connect. You can use telepresence loglevel --help to get - more information on options for the command. - docs: reference/config - - - type: change - title: All components have info as the default log-level - body: >- - We've now set the default for all components of Telepresence (traffic-agent, - traffic-manager, local daemons) to use info as the default log-level. - - - type: bugfix - title: Updating RBAC in helm chart to fix cluster-id regression - body: >- - In 2.4.1, we enabled the traffic-manager to get the cluster ID by getting the UID - of the default namespace. The helm chart was not updated to give the traffic-manager - those permissions, which has since been fixed. This impacted users who use licensed features of - the Telepresence extension in an air-gapped environment. - docs: reference/cluster-config/#air-gapped-cluster - - - type: bugfix - title: Timeouts for Helm actions are now respected - body: >- - The user-defined timeout for Helm actions wasn't always respected, causing the daemon to hang - indefinitely when failing to install the traffic-manager. - docs: reference/config#timeouts - - - version: 2.4.1 - date: "2021-08-30" - notes: - - type: feature - title: External cloud variables are now configurable - body: >- - We now support configuring the host and port for the cloud in your config.yml. These - are used when logging in to utilize features provided by an extension, and are also passed - along as environment variables when installing the `traffic-manager`. Additionally, we - now run our testsuite with these variables set to localhost to continue to ensure Telepresence - is fully fuctional without depeneding on an external service. The SYSTEMA_HOST and SYSTEMA_PORT - environment variables are no longer used. - image: telepresence-2.4.1-systema-vars.png - docs: reference/config/#cloud - - - type: feature - title: Helm chart can now regenerate certificate used for mutating webhook on-demand. - body: >- - You can now set agentInjector.certificate.regenerate when deploying Telepresence - with the Helm chart to automatically regenerate the certificate used by the agent injector webhook. - docs: install/helm - - - type: change - title: Traffic Manager installed via helm - body: >- - The traffic-manager is now installed via an embedded version of the Helm chart when telepresence connect is first performed on a cluster. - This change is transparent to the user. - A new configuration flag, timeouts.helm sets the timeouts for all helm operations performed by the Telepresence binary. - docs: reference/config#timeouts - - - type: change - title: traffic-manager gets cluster ID itself instead of via environment variable - body: >- - The traffic-manager used to get the cluster ID as an environment variable when running - telepresence connnect or via adding the value in the helm chart. This was - clunky so now the traffic-manager gets the value itself as long as it has permissions - to "get" and "list" namespaces (this has been updated in the helm chart). - docs: install/helm - - - type: bugfix - title: Telepresence now mounts all directories from /var/run/secrets - body: >- - In the past, we only mounted secret directories in /var/run/secrets/kubernetes.io. - We now mount *all* directories in /var/run/secrets, which, for example, includes - directories like eks.amazonaws.com used for IRSA tokens. - docs: reference/volume - - - type: bugfix - title: Max gRPC receive size correctly propagates to all grpc servers - body: >- - This fixes a bug where the max gRPC receive size was only propagated to some of the - grpc servers, causing failures when the message size was over the default. - docs: reference/config/#grpc - - - type: bugfix - title: Updated our Homebrew packaging to run manually - body: >- - We made some updates to our script that packages Telepresence for Homebrew so that it - can be run manually. This will enable maintainers of Telepresence to run the script manually - should we ever need to rollback a release and have latest point to an older verison. - docs: install/ - - - type: bugfix - title: Telepresence uses namespace from kubeconfig context on each call - body: >- - In the past, Telepresence would use whatever namespace was specified in the kubeconfig's current-context - for the entirety of the time a user was connected to Telepresence. This would lead to confusing behavior - when a user changed the context in their kubeconfig and expected Telepresence to acknowledge that change. - Telepresence now will do that and use the namespace designated by the context on each call. - - - type: bugfix - title: Idle outbound TCP connections timeout increased to 7200 seconds - body: >- - Some users were noticing that their intercepts would start failing after 60 seconds. - This was because the keep idle outbound TCP connections were set to 60 seconds, which we have - now bumped to 7200 seconds to match Linux's tcp_keepalive_time default. - - - type: bugfix - title: Telepresence will automatically remove a socket upon ungraceful termination - body: >- - When a Telepresence process terminates ungracefully, it would inform users that "this usually means - that the process has terminated ungracefully" and implied that they should remove the socket. We've - now made it so Telepresence will automatically attempt to remove the socket upon ungraceful termination. - - - type: bugfix - title: Fixed user daemon deadlock - body: >- - Remedied a situation where the user daemon could hang when a user was logged in. - - - type: bugfix - title: Fixed agentImage config setting - body: >- - The config setting images.agentImages is no longer required to contain the repository, and it - will use the value at images.repository. - docs: reference/config/#images - - - version: 2.4.0 - date: "2021-08-04" - notes: - - type: feature - title: Windows Client Developer Preview - body: >- - There is now a native Windows client for Telepresence that is being released as a Developer Preview. - All the same features supported by the MacOS and Linux client are available on Windows. - image: telepresence-2.4.0-windows.png - docs: install - - - type: feature - title: CLI raises helpful messages from Ambassador Cloud - body: >- - Telepresence can now receive messages from Ambassador Cloud and raise - them to the user when they perform certain commands. This enables us - to send you messages that may enhance your Telepresence experience when - using certain commands. Frequency of messages can be configured in your - config.yml. - image: telepresence-2.4.0-cloud-messages.png - docs: reference/config#cloud - - - type: bugfix - title: Improved stability of systemd-resolved-based DNS - body: >- - When initializing the systemd-resolved-based DNS, the routing domain - is set to improve stability in non-standard configurations. This also enables the - overriding resolver to do a proper take over once the DNS service ends. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Fixed an edge case when intercepting a container with multiple ports - body: >- - When specifying a port of a container to intercept, if there was a container in the - pod without ports, it was automatically selected. This has been fixed so we'll only - choose the container with "no ports" if there's no container that explicitly matches - the port used in your intercept. - docs: reference/intercepts/#creating-an-intercept-when-a-service-has-multiple-ports - - - type: bugfix - title: $(NAME) references in agent's environments are now interpolated correctly. - body: >- - If you had an environment variable $(NAME) in your workload that referenced another, intercepts - would not correctly interpolate $(NAME). This has been fixed and works automatically. - - - type: bugfix - title: Telepresence no longer prints INFO message when there is no config.yml - body: >- - Fixed a regression that printed an INFO message to the terminal when there wasn't a - config.yml present. The config is optional, so this message has been - removed. - docs: reference/config - - - type: bugfix - title: Telepresence no longer panics when using --http-match - body: >- - Fixed a bug where Telepresence would panic if the value passed to --http-match - didn't contain an equal sign, which has been fixed. The correct syntax is in the --help - string and looks like --http-match=HTTP2_HEADER=REGEX - docs: reference/intercepts/#intercept-behavior-when-logged-in-to-ambassador-cloud - - - type: bugfix - title: Improved subnet updates - body: >- - The `traffic-manager` used to update subnets whenever the `Nodes` or `Pods` changed, even if - the underlying subnet hadn't changed, which created a lot of unnecessary traffic between the - client and the `traffic-manager`. This has been fixed so we only send updates when the subnets - themselves actually change. - docs: reference/routing/#subnets - - - version: 2.3.7 - date: "2021-07-23" - notes: - - type: feature - title: Also-proxy in telepresence status - body: >- - An also-proxy entry in the Kubernetes cluster config will - show up in the output of the telepresence status command. - docs: reference/config - - - type: feature - title: Non-interactive telepresence login - body: >- - telepresence login now has an - --apikey=KEY flag that allows for - non-interactive logins. This is useful for headless - environments where launching a web-browser is impossible, - such as cloud shells, Docker containers, or CI. - image: telepresence-2.3.7-newkey.png - docs: reference/client/login/ - - - type: bugfix - title: Mutating webhook injector correctly hides named ports for probes. - body: >- - The mutating webhook injector has been fixed to correctly rename named ports for liveness and readiness probes - docs: reference/cluster-config - - - type: bugfix - title: telepresence current-cluster-id crash fixed - body: >- - Fixed a regression introduced in 2.3.5 that caused `telepresence current-cluster-id` - to crash. - docs: reference/cluster-config - - - type: bugfix - title: Better UX around intercepts with no local process running - body: >- - Requests would hang indefinitely when initiating an intercept before you - had a local process running. This has been fixed and will result in an - Empty reply from server until you start a local process. - docs: reference/intercepts - - - type: bugfix - title: API keys no longer show as "no description" - body: >- - New API keys generated internally for communication with - Ambassador Cloud no longer show up as "no description" in - the Ambassador Cloud web UI. Existing API keys generated by - older versions of Telepresence will still show up this way. - image: telepresence-2.3.7-keydesc.png - - - type: bugfix - title: Fix corruption of user-info.json - body: >- - Fixed a race condition that logging in and logging out - rapidly could cause memory corruption or corruption of the - user-info.json cache file used when - authenticating with Ambassador Cloud. - - - type: bugfix - title: Improved DNS resolver for systemd-resolved - body: - Telepresence's systemd-resolved-based DNS resolver is now more - stable and in case it fails to initialize, the overriding resolver - will no longer cause general DNS lookup failures when telepresence defaults to - using it. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Faster telepresence list command - body: - The performance of telepresence list has been increased - significantly by reducing the number of calls the command makes to the cluster. - docs: reference/client - - - version: 2.3.6 - date: "2021-07-20" - notes: - - type: bugfix - title: Fix preview URLs - body: >- - Fixed a regression introduced in 2.3.5 that caused preview - URLs to not work. - - - type: bugfix - title: Fix subnet discovery - body: >- - Fixed a regression introduced in 2.3.5 where the Traffic - Manager's RoleBinding did not correctly appoint - the traffic-manager Role, causing - subnet discovery to not be able to work correctly. - docs: reference/rbac/ - - - type: bugfix - title: Fix root-user configuration loading - body: >- - Fixed a regression introduced in 2.3.5 where the root daemon - did not correctly read the configuration file; ignoring the - user's configured log levels and timeouts. - docs: reference/config/ - - - type: bugfix - title: Fix a user daemon crash - body: >- - Fixed an issue that could cause the user daemon to crash - during shutdown, as during shutdown it unconditionally - attempted to close a channel even though the channel might - already be closed. - - - version: 2.3.5 - date: "2021-07-15" - notes: - - type: feature - title: traffic-manager in multiple namespaces - body: >- - We now support installing multiple traffic managers in the same cluster. - This will allow operators to install deployments of telepresence that are - limited to certain namespaces. - image: ./telepresence-2.3.5-traffic-manager-namespaces.png - docs: install/helm - - type: feature - title: No more dependence on kubectl - body: >- - Telepresence no longer depends on having an external - kubectl binary, which might not be present for - OpenShift users (who have oc instead of - kubectl). - - type: feature - title: Agent image now configurable - body: >- - We now support configuring which agent image + registry to use in the - config. This enables users whose laptop is an air-gapped environment to - create personal intercepts without requiring a login. It also makes it easier - for those who are developing on Telepresence to specify which agent image should - be used. Env vars TELEPRESENCE_AGENT_IMAGE and TELEPRESENCE_REGISTRY are no longer - used. - image: ./telepresence-2.3.5-agent-config.png - docs: reference/config/#images - - type: feature - title: Max gRPC receive size now configurable - body: >- - The default max size of messages received through gRPC (4 MB) is sometimes insufficient. It can now be configured. - image: ./telepresence-2.3.5-grpc-max-receive-size.png - docs: reference/config/#grpc - - type: feature - title: CLI can be used in air-gapped environments - body: >- - While Telepresence will auto-detect if your cluster is in an air-gapped environment, - we've added an option users can add to their config.yml to ensure the cli acts like it - is in an air-gapped environment. Air-gapped environments require a manually installed - licence. - docs: reference/cluster-config/#air-gapped-cluster - image: ./telepresence-2.3.5-skipLogin.png - - version: 2.3.4 - date: "2021-07-09" - notes: - - type: bugfix - title: Improved IP log statements - body: >- - Some log statements were printing incorrect characters, when they should have been IP addresses. - This has been resolved to include more accurate and useful logging. - docs: reference/config/#log-levels - image: ./telepresence-2.3.4-ip-error.png - - type: bugfix - title: Improved messaging when multiple services match a workload - body: >- - If multiple services matched a workload when performing an intercept, Telepresence would crash. - It now gives the correct error message, instructing the user on how to specify which - service the intercept should use. - image: ./telepresence-2.3.4-improved-error.png - docs: reference/intercepts - - type: bugfix - title: Traffic-manger creates services in its own namespace to determine subnet - body: >- - Telepresence will now determine the service subnet by creating a dummy-service in its own - namespace, instead of the default namespace, which was causing RBAC permissions issues in - some clusters. - docs: reference/routing/#subnets - - type: bugfix - title: Telepresence connect respects pre-existing clusterrole - body: >- - When Telepresence connects, if the traffic-manager's desired clusterrole already exists in the - cluster, Telepresence will no longer try to update the clusterrole. - docs: reference/rbac - - type: bugfix - title: Helm Chart fixed for clientRbac.namespaced - body: >- - The Telepresence Helm chart no longer fails when installing with --set clientRbac.namespaced=true. - docs: install/helm - - version: 2.3.3 - date: "2021-07-07" - notes: - - type: feature - title: Traffic Manager Helm Chart - body: >- - Telepresence now supports installing the Traffic Manager via Helm. - This will make it easy for operators to install and configure the - server-side components of Telepresence separately from the CLI (which - in turn allows for better separation of permissions). - image: ./telepresence-2.3.3-helm.png - docs: install/helm/ - - type: feature - title: Traffic-manager in custom namespace - body: >- - As the traffic-manager can now be installed in any - namespace via Helm, Telepresence can now be configured to look for the - Traffic Manager in a namespace other than ambassador. - This can be configured on a per-cluster basis. - image: ./telepresence-2.3.3-namespace-config.png - docs: reference/config - - type: feature - title: Intercept --to-pod - body: >- - telepresence intercept now supports a - --to-pod flag that can be used to port-forward sidecars' - ports from an intercepted pod. - image: ./telepresence-2.3.3-to-pod.png - docs: reference/intercepts - - type: change - title: Change in migration from edgectl - body: >- - Telepresence no longer automatically shuts down the old - api_version=1 edgectl daemon. If migrating - from such an old version of edgectl you must now manually - shut down the edgectl daemon before running Telepresence. - This was already the case when migrating from the newer - api_version=2 edgectl. - - type: bugfix - title: Fixed error during shutdown - body: >- - The root daemon no longer terminates when the user daemon disconnects - from its gRPC streams, and instead waits to be terminated by the CLI. - This could cause problems with things not being cleaned up correctly. - - type: bugfix - title: Intercepts will survive deletion of intercepted pod - body: >- - An intercept will survive deletion of the intercepted pod provided - that another pod is created (or already exists) that can take over. - - version: 2.3.2 - date: "2021-06-18" - notes: - # Headliners - - type: feature - title: Service Port Annotation - body: >- - The mutator webhook for injecting traffic-agents now - recognizes a - telepresence.getambassador.io/inject-service-port - annotation to specify which port to intercept; bringing the - functionality of the --port flag to users who - use the mutator webook in order to control Telepresence via - GitOps. - image: ./telepresence-2.3.2-svcport-annotation.png - docs: reference/cluster-config#service-port-annotation - - type: feature - title: Outbound Connections - body: >- - Outbound connections are now routed through the intercepted - Pods which means that the connections originate from that - Pod from the cluster's perspective. This allows service - meshes to correctly identify the traffic. - docs: reference/routing/#outbound - - type: change - title: Inbound Connections - body: >- - Inbound connections from an intercepted agent are now - tunneled to the manager over the existing gRPC connection, - instead of establishing a new connection to the manager for - each inbound connection. This avoids interference from - certain service mesh configurations. - docs: reference/routing/#inbound - - # RBAC changes - - type: change - title: Traffic Manager needs new RBAC permissions - body: >- - The Traffic Manager requires RBAC - permissions to list Nodes, Pods, and to create a dummy - Service in the manager's namespace. - docs: reference/routing/#subnets - - type: change - title: Reduced developer RBAC requirements - body: >- - The on-laptop client no longer requires RBAC permissions to list the Nodes - in the cluster or to create Services, as that functionality - has been moved to the Traffic Manager. - - # Bugfixes - - type: bugfix - title: Able to detect subnets - body: >- - Telepresence will now detect the Pod CIDR ranges even if - they are not listed in the Nodes. - image: ./telepresence-2.3.2-subnets.png - docs: reference/routing/#subnets - - type: bugfix - title: Dynamic IP ranges - body: >- - The list of cluster subnets that the virtual network - interface will route is now configured dynamically and will - follow changes in the cluster. - - type: bugfix - title: No duplicate subnets - body: >- - Subnets fully covered by other subnets are now pruned - internally and thus never superfluously added to the - laptop's routing table. - docs: reference/routing/#subnets - - type: change # not a bugfix, but it only makes sense to mention after the above bugfixes - title: Change in default timeout - body: >- - The trafficManagerAPI timeout default has - changed from 5 seconds to 15 seconds, in order to facilitate - the extended time it takes for the traffic-manager to do its - initial discovery of cluster info as a result of the above - bugfixes. - - type: bugfix - title: Removal of DNS config files on macOS - body: >- - On macOS, files generated under - /etc/resolver/ as the result of using - include-suffixes in the cluster config are now - properly removed on quit. - docs: reference/routing/#macos-resolver - - - type: bugfix - title: Large file transfers - body: >- - Telepresence no longer erroneously terminates connections - early when sending a large HTTP response from an intercepted - service. - - type: bugfix - title: Race condition in shutdown - body: >- - When shutting down the user-daemon or root-daemon on the - laptop, telepresence quit and related commands - no longer return early before everything is fully shut down. - Now it can be counted on that by the time the command has - returned that all of the side-effects on the laptop have - been cleaned up. - - version: 2.3.1 - date: "2021-06-14" - notes: - - title: DNS Resolver Configuration - body: "Telepresence now supports per-cluster configuration for custom dns behavior, which will enable users to determine which local + remote resolver to use and which suffixes should be ignored + included. These can be configured on a per-cluster basis." - image: ./telepresence-2.3.1-dns.png - docs: reference/config - type: feature - - title: AlsoProxy Configuration - body: "Telepresence now supports also proxying user-specified subnets so that they can access external services only accessible to the cluster while connected to Telepresence. These can be configured on a per-cluster basis and each subnet is added to the TUN device so that requests are routed to the cluster for IPs that fall within that subnet." - image: ./telepresence-2.3.1-alsoProxy.png - docs: reference/config - type: feature - - title: Mutating Webhook for Injecting Traffic Agents - body: "The Traffic Manager now contains a mutating webhook to automatically add an agent to pods that have the telepresence.getambassador.io/traffic-agent: enabled annotation. This enables Telepresence to work well with GitOps CD platforms that rely on higher level kubernetes objects matching what is stored in git. For workloads without the annotation, Telepresence will add the agent the way it has in the past" - image: ./telepresence-2.3.1-inject.png - docs: reference/rbac - type: feature - - title: Traffic Manager Connect Timeout - body: "The trafficManagerConnect timeout default has changed from 20 seconds to 60 seconds, in order to facilitate the extended time it takes to apply everything needed for the mutator webhook." - image: ./telepresence-2.3.1-trafficmanagerconnect.png - docs: reference/config - type: change - - title: Fix for large file transfers - body: "Fix a tun-device bug where sometimes large transfers from services on the cluster would hang indefinitely" - image: ./telepresence-2.3.1-large-file-transfer.png - docs: reference/tun-device - type: bugfix - - title: Brew Formula Changed - body: "Now that the Telepresence rewrite is the main version of Telepresence, you can install it via Brew like so: brew install datawire/blackbird/telepresence." - image: ./telepresence-2.3.1-brew.png - docs: install/ - type: change - - version: 2.3.0 - date: "2021-06-01" - notes: - - title: Brew install Telepresence - body: "Telepresence can now be installed via brew on macOS, which makes it easier for users to stay up-to-date with the latest telepresence version. To install via brew, you can use the following command: brew install datawire/blackbird/telepresence2." - image: ./telepresence-2.3.0-homebrew.png - docs: install/ - type: feature - - title: TCP and UDP routing via Virtual Network Interface - body: "Telepresence will now perform routing of outbound TCP and UDP traffic via a Virtual Network Interface (VIF). The VIF is a layer 3 TUN-device that exists while Telepresence is connected. It makes the subnets in the cluster available to the workstation and will also route DNS requests to the cluster and forward them to intercepted pods. This means that pods with custom DNS configuration will work as expected. Prior versions of Telepresence would use firewall rules and were only capable of routing TCP." - image: ./tunnel.jpg - docs: reference/tun-device - type: feature - - title: SSH is no longer used - body: "All traffic between the client and the cluster is now tunneled via the traffic manager gRPC API. This means that Telepresence no longer uses ssh tunnels and that the manager no longer have an sshd installed. Volume mounts are still established using sshfs but it is now configured to communicate using the sftp-protocol directly, which means that the traffic agent also runs without sshd. A desired side effect of this is that the manager and agent containers no longer need a special user configuration." - image: ./no-ssh.png - docs: reference/tun-device/#no-ssh-required - type: change - - title: Running in a Docker container - body: "Telepresence can now be run inside a Docker container. This can be useful for avoiding side effects on a workstation's network, establishing multiple sessions with the traffic manager, or working with different clusters simultaneously." - image: ./run-tp-in-docker.png - docs: reference/inside-container - type: feature - - title: Configurable Log Levels - body: "Telepresence now supports configuring the log level for Root Daemon and User Daemon logs. This provides control over the nature and volume of information that Telepresence generates in daemon.log and connector.log." - image: ./telepresence-2.3.0-loglevels.png - docs: reference/config/#log-levels - type: feature - - version: 2.2.2 - date: "2021-05-17" - notes: - - title: Legacy Telepresence subcommands - body: Telepresence is now able to translate common legacy Telepresence commands into native Telepresence commands. So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used to with the new Telepresence binary. - image: ./telepresence-2.2.png - docs: install/migrate-from-legacy/ - type: feature diff --git a/docs/v2.4/troubleshooting/index.md b/docs/v2.4/troubleshooting/index.md deleted file mode 100644 index 21ff5405..00000000 --- a/docs/v2.4/troubleshooting/index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an intercept did not generate a preview URL - -Preview URLs can only be created if Telepresence is [logged in to -Ambassador Cloud](../reference/client/login/). When not logged in, it -will not even try to create a preview URL (additionally, by default it -will intercept all traffic rather than just a subset of the traffic). -Remove the intercept with `telepresence leave [deployment name]`, run -`telepresence login` to login to Ambassador Cloud, then recreate the -intercept. See the [intercepts how-to doc](../howtos/intercepts) for -more details. - -## Error on accessing preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on accessing preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Connecting to a cluster via VPN doesn't work. - -There are a few different issues that could arise when working with a VPN. Please see the [dedicated page](../reference/vpn) on Telepresence and VPNs to learn more on how to fix these. - -## Your GitHub organization isn't listed - -Ambassador Cloud needs access granted to your GitHub organization as a -third-party OAuth app. If an organization isn't listed during login -then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → -**Settings** → **Applications** → **Authorized OAuth Apps** → -**Ambassador Labs**. An organization owner will have a **Grant** -button, anyone not an owner will have **Request** which sends an email -to the owner. If an access request has been denied in the past the -user will not see the **Request** button, they will have to reach out -to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in; -you should see the GitHub organization listed. - -The organization owner can go to the **GitHub menu** → **Your -organizations** → **[org name]** → **Settings** → **Third-party -access** to see if Ambassador Labs has access already or authorize a -request for access (only owners will see **Settings** on the -organization page). Clicking the pencil icon will show the -permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or requesting access on initial login - -When using GitHub as your identity provider, the first time you log in -to Ambassador Cloud GitHub will ask to authorize Ambassador Labs to -access your organizations and certain user data. - - - -Any listed organization with a green check has already granted access -to Ambassador Labs (you still need to authorize to allow Ambassador -Labs to read your user data and organization membership). - -Any organization with a red "X" requires access to be granted to -Ambassador Labs. Owners of the organization will see a **Grant** -button. Anyone who is not an owner will see a **Request** button. -This will send an email to the organization owner requesting approval -to access the organization. If an access request has been denied in -the past the user will not see the **Request** button, they will have -to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud -then back in to select the organization. - -### Volume mounts are not working on macOS - -It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. Lately there's been some issues using `brew install sshfs` a macOS workstation because the required component `osxfuse` (now named `macfuse`) isn't open source and hence, no longer supported. As a workaround, you can now use `gromgit/fuse/sshfs-mac` instead. Follow these steps: - -1. Remove old sshfs, macfuse, osxfuse using `brew uninstall` -2. `brew install --cask macfuse` -3. `brew install gromgit/fuse/sshfs-mac` -4. `brew link --overwrite sshfs-mac` - -Now sshfs -V shows you the correct version, e.g.: -``` -$ sshfs -V -SSHFS version 2.10 -FUSE library version: 2.9.9 -fuse: no mount point -``` - -but one more thing must be done before it works OK: -5. Try a mount (or an intercept that performs a mount). It will fail because you need to give permission to “Benjamin Fleischer” to execute a kernel extension (a pop-up appears that takes you to the system preferences). -6. Approve the needed permission -7. Reboot your computer. diff --git a/docs/v2.4/tutorial.md b/docs/v2.4/tutorial.md deleted file mode 100644 index 9cc7745c..00000000 --- a/docs/v2.4/tutorial.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for -your OS to install it and log in to Ambassador Cloud in your browser. -Follow the prompts to log in with GitHub then select your -organization. You will be redirected to the Ambassador Cloud -dashboard; later you will manage your preview URLs here. - -### macOS - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/2.4.11/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/2.4.11/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -### Linux - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/2.4.11/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and - logging in to Ambassador Cloud. The Cloud dashboard is used to - manage your intercepts and share them with colleagues. You must be - logged in to create personal intercepts as we are going to do here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/v2.4/versions.yml b/docs/v2.4/versions.yml deleted file mode 100644 index 3781c2ae..00000000 --- a/docs/v2.4/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.4.10" -dlVersion: "latest" -docsVersion: "2.4" -branch: release/v2 -productName: "Telepresence" diff --git a/docs/v2.5/.DS_Store b/docs/v2.5/.DS_Store deleted file mode 100644 index 5063f1c8..00000000 Binary files a/docs/v2.5/.DS_Store and /dev/null differ diff --git a/docs/v2.5/community.md b/docs/v2.5/community.md deleted file mode 100644 index 922457c9..00000000 --- a/docs/v2.5/community.md +++ /dev/null @@ -1,12 +0,0 @@ -# Community - -## Contributor's guide -Please review our [contributor's guide](https://github.com/telepresenceio/telepresence/blob/release/v2/DEVELOPING.md) -on GitHub to learn how you can help make Telepresence better. - -## Changelog -Our [changelog](https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md) -describes new features, bug fixes, and updates to each version of Telepresence. - -## Meetings -Check out our community [meeting schedule](https://github.com/telepresenceio/telepresence/blob/release/v2/MEETING_SCHEDULE.md) for opportunities to interact with Telepresence developers. diff --git a/docs/v2.5/concepts/context-prop.md b/docs/v2.5/concepts/context-prop.md deleted file mode 100644 index b3eb41e3..00000000 --- a/docs/v2.5/concepts/context-prop.md +++ /dev/null @@ -1,37 +0,0 @@ -# Context propagation - -**Context propagation** is the transfer of request metadata across the services and remote processes of a distributed system. Telepresence uses context propagation to intelligently route requests to the appropriate destination. - -This metadata is the context that is transferred across system services. It commonly takes the form of HTTP headers; context propagation is usually referred to as header propagation. A component of the system (like a proxy or performance monitoring tool) injects the headers into requests as it relays them. - -Metadata propagation refers to any service or other middleware not stripping away the headers. Propagation facilitates the movement of the injected contexts between other downstream services and processes. - - -## What is distributed tracing? - -Distributed tracing is a technique for troubleshooting and profiling distributed microservices applications and is a common application for context propagation. It is becoming a key component for debugging. - -In a microservices architecture, a single request may trigger additional requests to other services. The originating service may not cause the failure or slow request directly; a downstream dependent service may instead be to blame. - -An application like Datadog or New Relic will use agents running on services throughout the system to inject traffic with HTTP headers (the context). They will track the request’s entire path from origin to destination to reply, gathering data on routes the requests follow and performance. The injected headers follow the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) (or another header format, such as [B3 headers](https://github.com/openzipkin/b3-propagation)), which facilitates maintaining the headers through every service without being stripped (the propagation). - - -## What are intercepts and preview URLs? - -[Intercepts](../../reference/intercepts) and [preview -URLs](../../howtos/preview-urls/) are functions of Telepresence that -enable easy local development from a remote Kubernetes cluster and -offer a preview environment for sharing and real-time collaboration. - -Telepresence uses custom HTTP headers and header propagation to -identify which traffic to intercept both for plain personal intercepts -and for personal intercepts with preview URLs; these techniques are -more commonly used for distributed tracing, so what they are being -used for is a little unorthodox, but the mechanisms for their use are -already widely deployed because of the prevalence of tracing. The -headers facilitate the smart routing of requests either to live -services in the cluster or services running locally on a developer’s -machine. The intercepted traffic can be further limited by using path -based routing. - -Preview URLs, when created, generate an ingress request containing a custom header with a token (the context). Telepresence sends this token to [Ambassador Cloud](https://app.getambassador.io) with other information about the preview. Visiting the preview URL directs the user to Ambassador Cloud, which proxies the user to the cluster ingress with the token header injected into the request. The request carrying the header is routed in the cluster to the appropriate pod (the propagation). The Traffic Agent on the service pod sees the header and intercepts the request, redirecting it to the local developer machine that ran the intercept. diff --git a/docs/v2.5/concepts/devloop.md b/docs/v2.5/concepts/devloop.md deleted file mode 100644 index 8b1fbf35..00000000 --- a/docs/v2.5/concepts/devloop.md +++ /dev/null @@ -1,50 +0,0 @@ -# The developer experience and the inner dev loop - -## How is the developer experience changing? - -The developer experience is the workflow a developer uses to develop, test, deploy, and release software. - -Typically this experience has consisted of both an inner dev loop and an outer dev loop. The inner dev loop is where the individual developer codes and tests, and once the developer pushes their code to version control, the outer dev loop is triggered. - -The outer dev loop is _everything else_ that happens leading up to release. This includes code merge, automated code review, test execution, deployment, [controlled (canary) release](https://www.getambassador.io/docs/argo/latest/concepts/canary/), and observation of results. The modern outer dev loop might include, for example, an automated CI/CD pipeline as part of a [GitOps workflow](https://www.getambassador.io/docs/argo/latest/concepts/gitops/#what-is-gitops) and a progressive delivery strategy relying on automated canaries, i.e. to make the outer loop as fast, efficient and automated as possible. - -Cloud-native technologies have fundamentally altered the developer experience in two ways: one, developers now have to take extra steps in the inner dev loop; two, developers need to be concerned with the outer dev loop as part of their workflow, even if most of their time is spent in the inner dev loop. - -Engineers now must design and build distributed service-based applications _and_ also assume responsibility for the full development life cycle. The new developer experience means that developers can no longer rely on monolithic application developer best practices, such as checking out the entire codebase and coding locally with a rapid “live-reload” inner development loop. Now developers have to manage external dependencies, build containers, and implement orchestration configuration (e.g. Kubernetes YAML). This may appear trivial at first glance, but this adds development time to the equation. - -## What is the inner dev loop? - -The inner dev loop is the single developer workflow. A single developer should be able to set up and use an inner dev loop to code and test changes quickly. - -Even within the Kubernetes space, developers will find much of the inner dev loop familiar. That is, code can still be written locally at a level that a developer controls and committed to version control. - -In a traditional inner dev loop, if a typical developer codes for 360 minutes (6 hours) a day, with a traditional local iterative development loop of 5 minutes — 3 coding, 1 building, i.e. compiling/deploying/reloading, 1 testing inspecting, and 10-20 seconds for committing code — they can expect to make ~70 iterations of their code per day. Any one of these iterations could be a release candidate. The only “developer tax” being paid here is for the commit process, which is negligible. - -![traditional inner dev loop](../../images/trad-inner-dev-loop.png) - -## In search of lost time: How does containerization change the inner dev loop? - -The inner dev loop is where writing and testing code happens, and time is critical for maximum developer productivity and getting features in front of end users. The faster the feedback loop, the faster developers can refactor and test again. - -Changes to the inner dev loop process, i.e., containerization, threaten to slow this development workflow down. Coding stays the same in the new inner dev loop, but code has to be containerized. The _containerized_ inner dev loop requires a number of new steps: - -* packaging code in containers -* writing a manifest to specify how Kubernetes should run the application (e.g., YAML-based configuration information, such as how much memory should be given to a container) -* pushing the container to the registry -* deploying containers in Kubernetes - -Each new step within the container inner dev loop adds to overall development time, and developers are repeating this process frequently. If the build time is incremented to 5 minutes — not atypical with a standard container build, registry upload, and deploy — then the number of possible development iterations per day drops to ~40. At the extreme that’s a 40% decrease in potential new features being released. This new container build step is a hidden tax, which is quite expensive. - - -![container inner dev loop](../../images/container-inner-dev-loop.png) - -## Tackling the slow inner dev loop - -A slow inner dev loop can negatively impact frontend and backend teams, delaying work on individual and team levels and slowing releases into production overall. - -For example: - -* Frontend developers have to wait for previews of backend changes on a shared dev/staging environment (for example, until CI/CD deploys a new version) and/or rely on mocks/stubs/virtual services when coding their application locally. These changes are only verifiable by going through the CI/CD process to build and deploy within a target environment. -* Backend developers have to wait for CI/CD to build and deploy their app to a target environment to verify that their code works correctly with cluster or cloud-based dependencies as well as to share their work to get feedback. - -New technologies and tools can facilitate cloud-native, containerized development. And in the case of a sluggish inner dev loop, developers can accelerate productivity with tools that help speed the loop up again. diff --git a/docs/v2.5/concepts/devworkflow.md b/docs/v2.5/concepts/devworkflow.md deleted file mode 100644 index fa24fc2b..00000000 --- a/docs/v2.5/concepts/devworkflow.md +++ /dev/null @@ -1,7 +0,0 @@ -# The changing development workflow - -A changing workflow is one of the main challenges for developers adopting Kubernetes. Software development itself isn’t the challenge. Developers can continue to [code using the languages and tools with which they are most productive and comfortable](https://www.getambassador.io/resources/kubernetes-local-dev-toolkit/). That’s the beauty of containerized development. - -However, the cloud-native, Kubernetes-based approach to development means adopting a new development workflow and development environment. Beyond the basics, such as figuring out how to containerize software, [how to run containers in Kubernetes](https://www.getambassador.io/docs/kubernetes/latest/concepts/appdev/), and how to deploy changes into containers, for example, Kubernetes adds complexity before it delivers efficiency. The promise of a “quicker way to develop software” applies at least within the traditional aspects of the inner dev loop, where the single developer codes, builds and tests their software. But both within the inner dev loop and once code is pushed into version control to trigger the outer dev loop, the developer experience changes considerably from what many developers are used to. - -In this new paradigm, new steps are added to the inner dev loop, and more broadly, the developer begins to share responsibility for the full life cycle of their software. Inevitably this means taking new workflows and tools on board to ensure that the full life cycle continues full speed ahead. diff --git a/docs/v2.5/concepts/faster.md b/docs/v2.5/concepts/faster.md deleted file mode 100644 index b649e415..00000000 --- a/docs/v2.5/concepts/faster.md +++ /dev/null @@ -1,25 +0,0 @@ -# Making the remote local: Faster feedback, collaboration and debugging - -With the goal of achieving [fast, efficient development](https://www.getambassador.io/use-case/local-kubernetes-development/), developers need a set of approaches to bridge the gap between remote Kubernetes clusters and local development, and reduce time to feedback and debugging. - -## How should I set up a Kubernetes development environment? - -[Setting up a development environment](https://www.getambassador.io/resources/development-environments-microservices/) for Kubernetes can be much more complex than the set up for traditional web applications. Creating and maintaining a Kubernetes development environment relies on a number of external dependencies, such as databases or authentication. - -While there are several ways to set up a Kubernetes development environment, most introduce complexities and impediments to speed. The dev environment should be set up to easily code and test in conditions where a service can access the resources it depends on. - -A good way to meet the goals of faster feedback, possibilities for collaboration, and scale in a realistic production environment is the "single service local, all other remote" environment. Developing in a fully remote environment offers some benefits, but for developers, it offers the slowest possible feedback loop. With local development in a remote environment, the developer retains considerable control while using tools like [Telepresence](../../quick-start/) to facilitate fast feedback, debugging and collaboration. - -## What is Telepresence? - -Telepresence is an open source tool that lets developers [code and test microservices locally against a remote Kubernetes cluster](../../quick-start/). Telepresence facilitates more efficient development workflows while relieving the need to worry about other service dependencies. - -## How can I get fast, efficient local development? - -The dev loop can be jump-started with the right development environment and Kubernetes development tools to support speed, efficiency and collaboration. Telepresence is designed to let Kubernetes developers code as though their laptop is in their Kubernetes cluster, enabling the service to run locally and be proxied into the remote cluster. Telepresence runs code locally and forwards requests to and from the remote Kubernetes cluster, bypassing the much slower process of waiting for a container to build, pushing it to registry, and deploying to production. - -A rapid and continuous feedback loop is essential for productivity and speed; Telepresence enables the fast, efficient feedback loop to ensure that developers can access the rapid local development loop they rely on without disrupting their own or other developers' workflows. Telepresence safely intercepts traffic from the production cluster and enables near-instant testing of code, local debugging in production, and [preview URL](../../howtos/preview-urls/) functionality to share dev environments with others for multi-user collaboration. - -Telepresence works by deploying a two-way network proxy in a pod running in a Kubernetes cluster. This pod proxies data from the Kubernetes environment (e.g., TCP connections, environment variables, volumes) to the local process. This proxy can intercept traffic meant for the service and reroute it to a local copy, which is ready for further (local) development. - -The intercept proxy works thanks to context propagation, which is most frequently associated with distributed tracing but also plays a key role in controllable intercepts and preview URLs. diff --git a/docs/v2.5/concepts/intercepts.md b/docs/v2.5/concepts/intercepts.md deleted file mode 100644 index 4b1e770f..00000000 --- a/docs/v2.5/concepts/intercepts.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "Types of intercepts" -description: "Short demonstration of personal vs global intercepts" ---- - -import React from 'react'; - -import Alert from '@material-ui/lab/Alert'; -import AppBar from '@material-ui/core/AppBar'; -import InterceptAnimationSVG from '@src/assets/images/intercept-animation.inline.svg' -import Paper from '@material-ui/core/Paper'; -import Tab from '@material-ui/core/Tab'; -import TabContext from '@material-ui/lab/TabContext'; -import TabList from '@material-ui/lab/TabList'; -import TabPanel from '@material-ui/lab/TabPanel'; - -export function Animation(props) { - let el = React.useRef(null); - React.useEffect(() => { - const queueAnimation = () => { - setTimeout(() => { - el.current?.getAnimations({subtree: true})?.forEach((anim) => { - anim.finish(); - anim.play(); - }) - queueAnimation(); - }, 3000); - }; - queueAnimation(); - }, el); - return ( -
- -
- ); -}; - -export function TabsContainer({ children, ...props }) { - const [state, setState] = React.useState({curTab: "personal"}); - React.useEffect(() => { - const query = new URLSearchParams(window.location.search); - var interceptType = query.get('intercept') || "personal"; - if (state.curTab != interceptType) { - setState({curTab: interceptType}); - } - }, [state, setState]) - var setURL = function(newTab) { - history.replaceState(null,null, - `?intercept=${newTab}${window.location.hash}`, - ); - }; - return ( -
- - - {setState({curTab: newTab}); setURL(newTab)}} aria-label="intercept types"> - - - - - - {children} - -
- ); -}; - -# Types of intercepts - - - - -# No intercept - - - - -This is the normal operation of your cluster without Telepresence. - - - - - -# Global intercept - - - - -**Global intercepts** replace the Kubernetes "Orders" service with the -Orders service running on your laptop. The users see no change, but -with all the traffic coming to your laptop, you can observe and debug -with all your dev tools. - - - -### Creating and using global intercepts - - 1. Creating the intercept: Intercept your service from your CLI: - - ```shell - telepresence intercept SERVICENAME --http-match=all - ``` - - - - Make sure your current kubectl context points to the target - cluster. If your service is running in a different namespace than - your current active context, use or change the `--namespace` flag. - - - - 2. Using the intercept: Send requests to your service: - - All requests will be sent to the version of your service that is - running in the local development environment. - - - - -# Personal intercept - -**Personal intercepts** allow you to be selective and intercept only -some of the traffic to a service while not interfering with the rest -of the traffic. This allows you to share a cluster with others on your -team without interfering with their work. - - - - -In the illustration above, **Orange** -requests are being made by Developer 2 on their laptop and the -**green** are made by a teammate, -Developer 1, on a different laptop. - -Each developer can intercept the Orders service for their requests only, -while sharing the rest of the development environment. - - - -### Creating and using personal intercepts - - 1. Creating the intercept: Intercept your service from your CLI: - - ```shell - telepresence intercept SERVICENAME --http-match=Personal-Intercept=126a72c7-be8b-4329-af64-768e207a184b - ``` - - We're using - `Personal-Intercept=126a72c7-be8b-4329-af64-768e207a184b` as the - header for the sake of the example, but you can use any - `key=value` pair you want, or `--http-match=auto` to have it - choose something automatically. - - - - Make sure your current kubect context points to the target - cluster. If your service is running in a different namespace than - your current active context, use or change the `--namespace` flag. - - - - 2. Using the intercept: Send requests to your service by passing the - HTTP header: - - ```http - Personal-Intercept: 126a72c7-be8b-4329-af64-768e207a184b - ``` - - - - Need a browser extension to modify or remove an HTTP-request-headers? - - Chrome - {' '} - Firefox - - - - 3. Using the intercept: Send requests to your service without the - HTTP header: - - Requests without the header will be sent to the version of your - service that is running in the cluster. This enables you to share - the cluster with a team! - -### Intercepting a specific endpoint - -It's not uncommon to have one service serving several endpoints. Telepresence is capable of limiting an -intercept to only affect the endpoints you want to work with by using one of the `--http-path-xxx` -flags below in addition to using `--http-match` flags. Only one such flag can be used in an intercept -and, contrary to the `--http-match` flag, it cannot be repeated. - -The following flags are available: - -| Flag | Meaning | -|-------------------------------|------------------------------------------------------------------| -| `--http-path-equal ` | Only intercept the endpoint for this exact path | -| `--http-path-prefix ` | Only intercept endpoints with a matching path prefix | -| `--http-path-regex ` | Only intercept endpoints that match the given regular expression | - - - diff --git a/docs/v2.5/doc-links.yml b/docs/v2.5/doc-links.yml deleted file mode 100644 index 246cfa50..00000000 --- a/docs/v2.5/doc-links.yml +++ /dev/null @@ -1,92 +0,0 @@ -- title: Quick start - link: quick-start -- title: Install Telepresence - items: - - title: Install - link: install/ - - title: Upgrade - link: install/upgrade/ - - title: Install Traffic Manager with Helm - link: install/helm/ - - title: Migrate from legacy Telepresence - link: install/migrate-from-legacy/ -- title: Core concepts - items: - - title: The changing development workflow - link: concepts/devworkflow - - title: The developer experience and the inner dev loop - link: concepts/devloop - - title: "Making the remote local: Faster feedback, collaboration and debugging" - link: concepts/faster - - title: Context propagation - link: concepts/context-prop - - title: Types of intercepts - link: concepts/intercepts -- title: How do I... - items: - - title: Intercept a service in your own environment - link: howtos/intercepts - - title: Share dev environments with preview URLs - link: howtos/preview-urls - - title: Proxy outbound traffic to my cluster - link: howtos/outbound - - title: Send requests to an intercepted service - link: howtos/request -- title: Telepresence for Docker - items: - - title: What is Telepresence for Docker - link: extension/intro - - title: Install into Docker-Desktop - link: extension/install - - title: Intercept into a Docker Container - link: extension/intercept - - title: Not Working? - link: extension/troubleshooting -- title: Technical reference - items: - - title: Architecture - link: reference/architecture - - title: Client reference - link: reference/client - items: - - title: login - link: reference/client/login - - title: Laptop-side configuration - link: reference/config - - title: Cluster-side configuration - link: reference/cluster-config - - title: Using Docker for intercepts - link: reference/docker-run - - title: Running Telepresence in a Docker container - link: reference/inside-container - - title: Environment variables - link: reference/environment - - title: Intercepts - link: reference/intercepts/ - items: - - title: Manually injecting the Traffic Agent - link: reference/intercepts/manual-agent - - title: Volume mounts - link: reference/volume - - title: RESTful API service - link: reference/restapi - - title: DNS resolution - link: reference/dns - - title: RBAC - link: reference/rbac - - title: Telepresence and VPNs - link: reference/vpn - - title: Networking through Virtual Network Interface - link: reference/tun-device - - title: Connection Routing - link: reference/routing - - title: Using Telepresence with Linkerd - link: reference/linkerd -- title: FAQs - link: faqs -- title: Troubleshooting - link: troubleshooting -- title: Community - link: community -- title: Release Notes - link: release-notes diff --git a/docs/v2.5/extension/install.md b/docs/v2.5/extension/install.md deleted file mode 100644 index dfa4b45e..00000000 --- a/docs/v2.5/extension/install.md +++ /dev/null @@ -1,13 +0,0 @@ -# Install Telepresence For Docker - -[Docker](https://docker.com) is a popular containerized runtime environment. Telepresence for Docker will allow you to easily use Telepresence with your Docker containers. - -## Install Docker - -Telepresence for Docker is available within the Docker Dashboard. [Install Docker](https://www.docker.com/get-started). - -## Install Telepresence For Docker - -Start up the Docker engine, and open the Docker Dashboard. In the left panel, click `Add Extension`. - -You will be given a screen of available extensions. Find Telepresence, and click `Install`. Telepresence should appear as a tab in the left panel. diff --git a/docs/v2.5/extension/intercept.md b/docs/v2.5/extension/intercept.md deleted file mode 100644 index 66a479dc..00000000 --- a/docs/v2.5/extension/intercept.md +++ /dev/null @@ -1,53 +0,0 @@ -# Create an Intercept - -## Prerequisites - -Before you begin, you need to have [Docker Desktop](https://www.docker.com/products/docker-desktop) and the Telepresence for Docker extension [installed](../install), as well as the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -This guide assumes you have a Kubernetes deployment with a running service, and that you can run a copy of that service in a docker container on your laptop. - -## Intercept your service with a personal intercept - -With the Telepresence for Docker extension, you can create [personal intercepts](../../concepts/intercepts/?intercept=personal) that intercept your cluster traffic that passes through a provided proxy url and routes it to your local docker container instead. - -1. Create a Copy of the Service You Want to Intercept - - Telepresence for Docker routes traffic to and from a local docker container. We need to setup a docker container to recieve this traffic. To do this, run - - ```console - $ docker run --rm -it --network host - ``` - - Telepresence for Docker requires the target service to be on the host network. This allows Telepresence to share a network with your container. The mounted network device redirects cluster-related traffic back into the cluster. - -2. Login to Ambassador Cloud - - To connect Telepresence for Docker to your account, you will need an API key. - - 1. Click the "Generate API Key" button to open a browser - - 2. Login using Google, Github, or Gitlab - - 3. You will be taken to your API Key page. Copy and paste it into the API form in the Docker Dashboard, and press Login. - -3. Connect to your cluster - - 1. Use the dropdown to choose the cluster you would like to use. The chosen cluster will be set to kubectl's current context. Press next. - - 2. Press "Connect to (your cluster)" to establish a connection. - -4. Intercept a service - - Once your are connected, Telepresence for Docker will discover services in the default namesapce and list them in a table. These are the services you can intercept in this namespace. To switch namespaces, choose a different namespace from the dropdown. - - 1. Choose a service to intercept and click the "Intercept" button for that service. A popup will appear with port options. - - 2. Then choose the target port, this is the port of the service in the docker conatiner we setup in step one. - - 3. Choose the service port of the service you would like to intercept from the dropdown. - - 4. Press Submit, a intercept will be created. - -5. Query the environment in which you intercepted a service and verify your local instance being invoked. - - All the traffic previously routed to and from your Kubernetes Service is now routed to and from your local container. Click the share button next to your Intercept to open your intercept in a browser, or to view your intercept in Ambassador Cloud. diff --git a/docs/v2.5/extension/intro.md b/docs/v2.5/extension/intro.md deleted file mode 100644 index f1c83acb..00000000 --- a/docs/v2.5/extension/intro.md +++ /dev/null @@ -1,13 +0,0 @@ -# Telepresence for Docker - -## What is Telepresence for Docker? - -Telepresence for Docker runs entirely within containers. The Telepresence Daemons run in a container, which can be given commands using the extension UI. When Telepresence intercepts a service, it redirects cloud traffic to other containers on the docker host network. - -## What is Telepresence for Docker good at? - -Telepresence for Docker is isolated from the user's machine; it operates entirely within the docker runtime. Therefore, Telepresence for Docker does not require root permission on the user's machine. - -## How does Telepresence for Docker work? - -Telepresence for Docker is configured to use Docker's host network (VM network for Windows and Mac, host network on Linux). Normally, docker containers are isolated from echother, however, containers can be configured to share a network, if they are both configured to use Docker's host network. diff --git a/docs/v2.5/extension/troubleshooting.md b/docs/v2.5/extension/troubleshooting.md deleted file mode 100644 index f0cc338b..00000000 --- a/docs/v2.5/extension/troubleshooting.md +++ /dev/null @@ -1,5 +0,0 @@ -## Troubleshooting - -## My Container isn't recieving/sending cloud traffic - -In the intercepts page is a button labeled "Targetable Containers." These are containers that Telepresence can detect that are configured with both host networking and the TUN device. diff --git a/docs/v2.5/faqs.md b/docs/v2.5/faqs.md deleted file mode 100644 index 08eab7a5..00000000 --- a/docs/v2.5/faqs.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: "Learn how Telepresence helps with fast development and debugging in your Kubernetes cluster." ---- - -# FAQs - -** Why Telepresence?** - -Modern microservices-based applications that are deployed into Kubernetes often consist of tens or hundreds of services. The resource constraints and number of these services means that it is often difficult to impossible to run all of this on a local development machine, which makes fast development and debugging very challenging. The fast [inner development loop](../concepts/devloop/) from previous software projects is often a distant memory for cloud developers. - -Telepresence enables you to connect your local development machine seamlessly to the cluster via a two way proxying mechanism. This enables you to code locally and run the majority of your services within a remote Kubernetes cluster -- which in the cloud means you have access to effectively unlimited resources. - -Ultimately, this empowers you to develop services locally and still test integrations with dependent services or data stores running in the remote cluster. - -You can “intercept” any requests made to a target Kubernetes workload, and code and debug your associated service locally using your favourite local IDE and in-process debugger. You can test your integrations by making requests against the remote cluster’s ingress and watching how the resulting internal traffic is handled by your service running locally. - -By using the preview URL functionality you can share access with additional developers or stakeholders to the application via an entry point associated with your intercept and locally developed service. You can make changes that are visible in near real-time to all of the participants authenticated and viewing the preview URL. All other viewers of the application entrypoint will not see the results of your changes. - -** What operating systems does Telepresence work on?** - -Telepresence currently works natively on macOS (Intel and Apple silicon), Linux, and WSL 2. Starting with v2.4.0, we are also releasing a native Windows version of Telepresence that we are considering a Developer Preview. - -** What protocols can be intercepted by Telepresence?** - -All HTTP/1.1 and HTTP/2 protocols can be intercepted. This includes: - -- REST -- JSON/XML over HTTP -- gRPC -- GraphQL - -If you need another protocol supported, please [drop us a line](https://www.getambassador.io/feedback/) to request it. - -** When using Telepresence to intercept a pod, are the Kubernetes cluster environment variables proxied to my local machine?** - -Yes, you can either set the pod's environment variables on your machine or write the variables to a file to use with Docker or another build process. Please see [the environment variable reference doc](../reference/environment) for more information. - -** When using Telepresence to intercept a pod, can the associated pod volume mounts also be mounted by my local machine?** - -Yes, please see [the volume mounts reference doc](../reference/volume/) for more information. - -** When connected to a Kubernetes cluster via Telepresence, can I access cluster-based services via their DNS name?** - -Yes. After you have successfully connected to your cluster via `telepresence connect` you will be able to access any service in your cluster via their namespace qualified DNS name. - -This means you can curl endpoints directly e.g. `curl .:8080/mypath`. - -If you create an intercept for a service in a namespace, you will be able to use the service name directly. - -This means if you `telepresence intercept -n `, you will be able to resolve just the `` DNS record. - -You can connect to databases or middleware running in the cluster, such as MySQL, PostgreSQL and RabbitMQ, via their service name. - -** When connected to a Kubernetes cluster via Telepresence, can I access cloud-based services and data stores via their DNS name?** - -You can connect to cloud-based data stores and services that are directly addressable within the cluster (e.g. when using an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) Service type), such as AWS RDS, Google pub-sub, or Azure SQL Database. - -** What types of ingress does Telepresence support for the preview URL functionality?** - -The preview URL functionality should work with most ingress configurations, including straightforward load balancer setups. - -Telepresence will discover/prompt during first use for this info and make its best guess at figuring this out and ask you to confirm or update this. - -** Why are my intercepts still reporting as active when they've been disconnected?** - - In certain cases, Telepresence might not have been able to communicate back with Ambassador Cloud to update the intercept's status. Worry not, they will get garbage collected after a period of time. - -** Why is my intercept associated with an "Unreported" cluster?** - - Intercepts tagged with "Unreported" clusters simply mean Ambassador Cloud was unable to associate a service instance with a known detailed service from an Edge Stack or API Gateway cluster. [Connecting your cluster to the Service Catalog](/docs/telepresence/latest/quick-start/) will properly match your services from multiple data sources. - -** Will Telepresence be able to intercept workloads running on a private cluster or cluster running within a virtual private cloud (VPC)?** - -Yes. The cluster has to have outbound access to the internet for the preview URLs to function correctly, but it doesn’t need to have a publicly accessible IP address. - -The cluster must also have access to an external registry in order to be able to download the traffic-manager and traffic-agent images that are deployed when connecting with Telepresence. - -** Why does running Telepresence require sudo access for the local daemon?** - -The local daemon needs sudo to create iptable mappings. Telepresence uses this to create outbound access from the laptop to the cluster. - -On Fedora, Telepresence also creates a virtual network device (a TUN network) for DNS routing. That also requires root access. - -** What components get installed in the cluster when running Telepresence?** - -A single `traffic-manager` service is deployed in the `ambassador` namespace within your cluster, and this manages resilient intercepts and connections between your local machine and the cluster. - -A Traffic Agent container is injected per pod that is being intercepted. The first time a workload is intercepted all pods associated with this workload will be restarted with the Traffic Agent automatically injected. - -** How can I remove all of the Telepresence components installed within my cluster?** - -You can run the command `telepresence uninstall --everything` to remove the `traffic-manager` service installed in the cluster and `traffic-agent` containers injected into each pod being intercepted. - -Running this command will also stop the local daemon running. - -** What language is Telepresence written in?** - -All components of the Telepresence application and cluster components are written using Go. - -** How does Telepresence connect and tunnel into the Kubernetes cluster?** - -The connection between your laptop and cluster is established by using -the `kubectl port-forward` machinery (though without actually spawning -a separate program) to establish a TCP connection to Telepresence -Traffic Manager in the cluster, and running Telepresence's custom VPN -protocol over that TCP connection. - - - -** What identity providers are supported for authenticating to view a preview URL?** - -* GitHub -* GitLab -* Google - -More authentication mechanisms and identity provider support will be added soon. Please [let us know](https://www.getambassador.io/feedback/) which providers are the most important to you and your team in order for us to prioritize those. - -** Is Telepresence open source?** - -Yes it is! You can find its source code on [GitHub](https://github.com/telepresenceio/telepresence). - -** How do I share my feedback on Telepresence?** - -Your feedback is always appreciated and helps us build a product that provides as much value as possible for our community. You can chat with us directly on our [feedback page](https://www.getambassador.io/feedback/), or you can [join our Slack channel](https://a8r.io/Slack) to share your thoughts. diff --git a/docs/v2.5/howtos/intercepts.md b/docs/v2.5/howtos/intercepts.md deleted file mode 100644 index 6adebd6c..00000000 --- a/docs/v2.5/howtos/intercepts.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Intercept a service in your own environment - -Telepresence enables you to create intercepts to a target Kubernetes workload. Once you have created and intercept, you can code and debug your associated service locally. - -For a detailed walk-though on creating intercepts using our sample app, follow the [quick start guide](../../quick-start/demo-node/). - - -## Prerequisites - -Before you begin, you need to have [Telepresence installed](../../install/), and either the Kubernetes command-line tool, [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/), or the OpenShift Container Platform command-line interface, [`oc`](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). This document uses kubectl in all example commands. OpenShift users can substitute oc [commands instead](https://docs.openshift.com/container-platform/4.1/cli_reference/developer-cli-commands.html). - -This guide assumes you have a Kubernetes deployment and service accessible publicly by an ingress controller, and that you can run a copy of that service on your laptop. - - -## Intercept your service with a global intercept - -With Telepresence, you can create [global intercepts](../../concepts/intercepts/?intercept=global) that intercept all traffic going to a service in your cluster and route it to your local environment instead. - -1. Connect to your cluster with `telepresence connect` and connect to the Kubernetes API server: - - ```console - $ curl -ik https://kubernetes.default - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - The 401 response is expected when you first connect. - - - You now have access to your remote Kubernetes API server as if you were on the same network. You can now use any local tools to connect to any service in the cluster. - - If you have difficulties connecting, make sure you are using Telepresence 2.0.3 or a later version. Check your version by entering `telepresence version` and [upgrade if needed](../../install/upgrade/). - - -2. Enter `telepresence list` and make sure the service you want to intercept is listed. For example: - - ```console - $ telepresence list - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -3. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example: - - ```console - $ kubectl get service example-service --output yaml - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -4. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - * For `--port`: specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - * For `--env-file`: specify a file path for Telepresence to write the environment variables that are set in the pod. - The example below shows Telepresence intercepting traffic going to service `example-service`. Requests now reach the service on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - ```console - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - - The following are some examples of how to pass the environment variables to your local process: - * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). - * **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. - * **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Query the environment in which you intercepted a service and verify your local instance being invoked. - All the traffic previously routed to your Kubernetes Service is now routed to your local environment - -You can now: -- Make changes on the fly and see them reflected when interacting with - your Kubernetes environment. -- Query services only exposed in your cluster's network. -- Set breakpoints in your IDE to investigate bugs. - - - - **Didn't work?** Make sure the port you're listening on matches the one you specified when you created your intercept. - - diff --git a/docs/v2.5/howtos/outbound.md b/docs/v2.5/howtos/outbound.md deleted file mode 100644 index e148023e..00000000 --- a/docs/v2.5/howtos/outbound.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: "Telepresence can connect to your Kubernetes cluster, letting you access cluster services as if your laptop was another pod in the cluster." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Proxy outbound traffic to my cluster - -While preview URLs are a powerful feature, Telepresence offers other options for proxying traffic between your laptop and the cluster. This section discribes how to proxy outbound traffic and control outbound connectivity to your cluster. - - This guide assumes that you have the quick start sample web app running in your cluster to test accessing the web-app service. You can substitute this service for any other service you are running. - -## Proxying outbound traffic - -Connecting to the cluster instead of running an intercept allows you to access cluster workloads as if your laptop was another pod in the cluster. This enables you to access other Kubernetes services using `.`. A service running on your laptop can interact with other services on the cluster by name. - -When you connect to your cluster, the background daemon on your machine runs and installs the [Traffic Manager deployment](../../reference/architecture/) into the cluster of your current `kubectl` context. The Traffic Manager handles the service proxying. - -1. Run `telepresence connect` and enter your password to run the daemon. - - ``` - $ telepresence connect - Launching Telepresence Daemon v2.3.7 (api v3) - Need root privileges to run "/usr/local/bin/telepresence daemon-foreground /home//.cache/telepresence/logs '' ''" - [sudo] password: - Connecting to traffic manager... - Connected to context default (https://) - ``` - -2. Run `telepresence status` to confirm connection to your cluster and that it is proxying traffic. - - ``` - $ telepresence status - Root Daemon: Running - Version : v2.3.7 (api 3) - Primary DNS : "" - Fallback DNS: "" - User Daemon: Running - Version : v2.3.7 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 0 total - ``` - -3. Access your service by name with `curl web-app.emojivoto:80`. Telepresence routes the request to the cluster, as if your laptop is actually running in the cluster. - - ``` - $ curl web-app.emojivoto:80 - - - - - Emoji Vote - ... - ``` - -If you terminate the client with `telepresence quit` and try to access the service again, it will fail because traffic is no longer proxied from your laptop. - - ``` - $ telepresence quit - Telepresence Daemon quitting...done - ``` - -When using Telepresence in this way, you need to access services with the namespace qualified DNS name (<service name>.<namespace>) before you start an intercept. After you start an intercept, only <service name> is required. Read more about these differences in the DNS resolution reference guide. - -## Controlling outbound connectivity - -By default, Telepresence provides access to all Services found in all namespaces in the connected cluster. This can lead to problems if the user does not have RBAC access permissions to all namespaces. You can use the `--mapped-namespaces ` flag to control which namespaces are accessible. - -When you use the `--mapped-namespaces` flag, you need to include all namespaces containing services you want to access, as well as all namespaces that contain services related to the intercept. - -### Using local-only intercepts - -When you develop on isolated apps or on a virtualized container, you don't need an outbound connection. However, when developing services that aren't deployed to the cluster, it can be necessary to provide outbound connectivity to the namespace where the service will be deployed. This is because services that aren't exposed through ingress controllers require connectivity to those services. When you provide outbound connectivity, the service can access other services in that namespace without using qualified names. A local-only intercept does not cause outbound connections to originate from the intercepted namespace. The reason for this is to establish correct origin; the connection must be routed to a `traffic-agent`of an intercepted pod. For local-only intercepts, the outbound connections originates from the `traffic-manager`. - -To control outbound connectivity to specific namespaces, add the `--local-only` flag: - - ``` - $ telepresence intercept --namespace --local-only - ``` -The resources in the given namespace can now be accessed using unqualified names as long as the intercept is active. -You can deactivate the intercept with `telepresence leave `. This removes unqualified name access. - -### Proxy outcound connectivity for laptops - -To specify additional hosts or subnets that should be resolved inside of the cluster, see [AlsoProxy](../../reference/config/#alsoproxy) for more details. \ No newline at end of file diff --git a/docs/v2.5/howtos/preview-urls.md b/docs/v2.5/howtos/preview-urls.md deleted file mode 100644 index 670f72dd..00000000 --- a/docs/v2.5/howtos/preview-urls.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -description: "Telepresence uses Preview URLs to help you collaborate on developing Kubernetes services with teammates." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Share development environments with preview URLs - -Telepresence can generate sharable preview URLs. This enables you to work on a copy of your service locally, and share that environment with a teammate for pair programming. While using preview URLs, Telepresence will route only the requests coming from that preview URL to your local environment. Requests to the ingress are routed to your cluster as usual. - -Preview URLs are protected behind authentication through Ambassador Cloud, and, access to the URL is only available to users in your organization. You can make the URL publicly accessible for sharing with outside collaborators. - -## Creating a preview URL - -1. Connect to Telepresence and enter the `telepresence list` command in your CLI to verify the service is listed. -Telepresence only supports Deployments, ReplicaSets, and StatefulSet workloads with a label that matches a Service. - -2. Enter `telepresence login` to launch Ambassador Cloud in your browser. - - If you are in an environment you can't launch Telepresence in your local browser, enter If you are in an environment where Telepresence cannot launch in a local browser, pass the [`--apikey` flag to `telepresence login`](../../reference/client/login/). - -3. Start the intercept with `telepresence intercept --port --env-file `and adjust the flags as follows: - Start the intercept: - * **port:** specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - * **env-file:** specify a file path for Telepresence to write the environment variables that are set in the pod. - -4. Answer the question prompts. - * **IWhat's your ingress' IP address?**: whether the ingress controller is expecting TLS communication on the specified port. - * **What's your ingress' TCP port number?**: the port your ingress controller is listening to. This is often 443 for TLS ports, and 80 for non-TLS ports. - * **Does that TCP port on your ingress use TLS (as opposed to cleartext)?**: whether the ingress controller is expecting TLS communication on the specified port. - * **If required by your ingress, specify a different hostname (TLS-SNI, HTTP "Host" header) to be used in requests.**: if your ingress controller routes traffic based on a domain name (often using the `Host` HTTP header), enter that value here. - - The example below shows a preview URL for `example-service` which listens on port 8080. The preview URL for ingress will use the `ambassador` service in the `ambassador` namespace on port `443` using TLS encryption and the hostname `dev-environment.edgestack.me`: - - ```console -$ telepresence intercept example-service --port 8080 --env-file ~/ex-svc.env - - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Confirm the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: -]: ambassador.ambassador - - 2/4: What's your ingress' TCP port number? - - [default: -]: 80 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: y - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: ambassador.ambassador]: dev-environment.edgestack.me - - Using deployment example-service - intercepted - Intercept name : example-service - State : ACTIVE - Destination : 127.0.0.1:8080 - Service Port Identifier: http - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":example-service") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname : dev-environment.edgestack.me - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - - Here are some examples of how to pass the environment variables to your local process: - * **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). - * **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. - * **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Go to the Preview URL generated from the intercept. -Traffic is now intercepted from your preview URL without impacting other traffic from your Ingress. - - - Didn't work? It might be because you have services in between your ingress controller and the service you are intercepting that do not propagate the x-telepresence-intercept-id HTTP Header. Read more on context propagation. - - -7. Make a request on the URL you would usually query for that environment. Don't route a request to your laptop. - - Normal traffic coming into the cluster through the Ingress (i.e. not coming from the preview URL) routes to services in the cluster like normal. - -8. Share with a teammate. - - You can collaborate with teammates by sending your preview URL to them. Once your teammate logs in, they must select the same identity provider and org as you are using. This authorizes their access to the preview URL. When they visit the preview URL, they see the intercepted service running on your laptop. - You can now collaborate with a teammate to debug the service on the shared intercept URL without impacting the production environment. - -## Sharing a preview URL with people outside your team - -To collaborate with someone outside of your identity provider's organization: -Log into [Ambassador Cloud](https://app.getambassador.io/cloud/). - navigate to your service's intercepts, select the preview URL details, and click **Make Publicly Accessible**. Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on your laptop. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. Removing the preview URL either from the dashboard or by running `telepresence preview remove ` also removes all access to the preview URL. - -## Change access restrictions - -To collaborate with someone outside of your identity provider's organization, you must make your preview URL publicly accessible. - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/). -2. Select the service you want to share and open the service details page. -3. Click the **Intercepts** tab and expand the preview URL details. -4. Click **Make Publicly Accessible**. - -Now anyone with the link will have access to the preview URL. When they visit the preview URL, they will see the intercepted service running on a local environment. - -To disable sharing the preview URL publicly, click **Require Authentication** in the dashboard. - -## Remove a preview URL from an Intercept - -To delete a preview URL and remove all access to the intercepted service, - -1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) -2. Click on the service you want to share and open the service details page. -3. Click the **Intercepts** tab and expand the preview URL details. -4. Click **Remove Preview**. - -Alternatively, you can remove a preview URL with the following command: -`telepresence preview remove ` diff --git a/docs/v2.5/howtos/request.md b/docs/v2.5/howtos/request.md deleted file mode 100644 index 1109c68d..00000000 --- a/docs/v2.5/howtos/request.md +++ /dev/null @@ -1,12 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Send requests to an intercepted service - -Ambassador Cloud can inform you about the required request parameters to reach an intercepted service. - - 1. Go to [Ambassador Cloud](https://app.getambassador.io/cloud/) - 2. Navigate to the desired service Intercepts page - 3. Click the **Query** button to open the pop-up menu. - 4. Toggle between **CURL**, **Headers** and **Browse**. - -The pre-built queries and header information will help you get started to query the desired intercepted service and manage header propagation. diff --git a/docs/v2.5/images/container-inner-dev-loop.png b/docs/v2.5/images/container-inner-dev-loop.png deleted file mode 100644 index 06586cd6..00000000 Binary files a/docs/v2.5/images/container-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.5/images/github-login.png b/docs/v2.5/images/github-login.png deleted file mode 100644 index cfd4d4bf..00000000 Binary files a/docs/v2.5/images/github-login.png and /dev/null differ diff --git a/docs/v2.5/images/logo.png b/docs/v2.5/images/logo.png deleted file mode 100644 index 701f63ba..00000000 Binary files a/docs/v2.5/images/logo.png and /dev/null differ diff --git a/docs/v2.5/images/split-tunnel.png b/docs/v2.5/images/split-tunnel.png deleted file mode 100644 index 5bf30378..00000000 Binary files a/docs/v2.5/images/split-tunnel.png and /dev/null differ diff --git a/docs/v2.5/images/trad-inner-dev-loop.png b/docs/v2.5/images/trad-inner-dev-loop.png deleted file mode 100644 index 618b674f..00000000 Binary files a/docs/v2.5/images/trad-inner-dev-loop.png and /dev/null differ diff --git a/docs/v2.5/images/tunnelblick.png b/docs/v2.5/images/tunnelblick.png deleted file mode 100644 index 8944d445..00000000 Binary files a/docs/v2.5/images/tunnelblick.png and /dev/null differ diff --git a/docs/v2.5/images/vpn-dns.png b/docs/v2.5/images/vpn-dns.png deleted file mode 100644 index eed535c4..00000000 Binary files a/docs/v2.5/images/vpn-dns.png and /dev/null differ diff --git a/docs/v2.5/install/helm.md b/docs/v2.5/install/helm.md deleted file mode 100644 index 688d2f20..00000000 --- a/docs/v2.5/install/helm.md +++ /dev/null @@ -1,181 +0,0 @@ -# Install with Helm - -[Helm](https://helm.sh) is a package manager for Kubernetes that automates the release and management of software on Kubernetes. The Telepresence Traffic Manager can be installed via a Helm chart with a few simple steps. - -**Note** that installing the Traffic Manager through Helm will prevent `telepresence connect` from ever upgrading it. If you wish to upgrade a Traffic Manager that was installed via the Helm chart, please see the steps [below](#upgrading-the-traffic-manager) - -For more details on what the Helm chart installs and what can be configured, see the Helm chart [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). - -## Before you begin - -The Telepresence Helm chart is hosted by Ambassador Labs and published at `https://app.getambassador.io`. - -Start by adding this repo to your Helm client with the following command: - -```shell -helm repo add datawire https://app.getambassador.io -helm repo update -``` - -## Install with Helm - -When you run the Helm chart, it installs all the components required for the Telepresence Traffic Manager. - -1. If you are installing the Telepresence Traffic Manager **for the first time on your cluster**, create the `ambassador` namespace in your cluster: - - ```shell - kubectl create namespace ambassador - ``` - -2. Install the Telepresence Traffic Manager with the following command: - - ```shell - helm install traffic-manager --namespace ambassador datawire/telepresence - ``` - -### Install into custom namespace - -The Helm chart supports being installed into any namespace, not necessarily `ambassador`. Simply pass a different `namespace` argument to `helm install`. -For example, if you wanted to deploy the traffic manager to the `staging` namespace: - -```bash -helm install traffic-manager --namespace staging datawire/telepresence -``` - -Note that users of Telepresence will need to configure their kubeconfig to find this installation of the Traffic Manager: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` - -See [the kubeconfig documentation](../../reference/config#manager) for more information. - -### Upgrading the Traffic Manager. - -Versions of the Traffic Manager Helm chart are coupled to the versions of the Telepresence CLI that they are intended for. -Thus, for example, if you wish to use Telepresence `v2.4.0`, you'll need to install version `v2.4.0` of the Traffic Manager Helm chart. - -Upgrading the Traffic Manager is the same as upgrading any other Helm chart; for example, if you installed the release into the `ambassador` namespace, and you just wished to upgrade it to the latest version without changing any configuration values: - -```shell -helm repo up -helm upgrade traffic-manager datawire/telepresence --reuse-values --namespace ambassador -``` - -If you want to upgrade the Traffic-Manager to a specific version, add a `--version` flag with the version number to the upgrade command. For example: `--version v2.4.1` - -## RBAC - -### Installing a namespace-scoped traffic manager - -You might not want the Traffic Manager to have permissions across the entire kubernetes cluster, or you might want to be able to install multiple traffic managers per cluster (for example, to separate them by environment). -In these cases, the traffic manager supports being installed with a namespace scope, allowing cluster administrators to limit the reach of a traffic manager's permissions. - -For example, suppose you want a Traffic Manager that only works on namespaces `dev` and `staging`. -To do this, create a `values.yaml` like the following: - -```yaml -managerRbac: - create: true - namespaced: true - namespaces: - - dev - - staging -``` - -This can then be installed via: - -```bash -helm install traffic-manager --namespace staging datawire/telepresence -f ./values.yaml -``` - -**NOTE** Do not install namespace-scoped Traffic Managers and a global Traffic Manager in the same cluster, as it could have unexpected effects. - -#### Namespace collision detection - -The Telepresence Helm chart will try to prevent namespace-scoped Traffic Managers from managing the same namespaces. -It will do this by creating a ConfigMap, called `traffic-manager-claim`, in each namespace that a given install manages. - -So, for example, suppose you install one Traffic Manager to manage namespaces `dev` and `staging`, as: - -```bash -helm install traffic-manager --namespace dev datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={dev,staging}' -``` - -You might then attempt to install another Traffic Manager to manage namespaces `staging` and `prod`: - -```bash -helm install traffic-manager --namespace prod datawire/telepresence --set 'managerRbac.namespaced=true' --set 'managerRbac.namespaces={staging,prod}' -``` - -This would fail with an error: - -``` -Error: rendered manifests contain a resource that already exists. Unable to continue with install: ConfigMap "traffic-manager-claim" in namespace "staging" exists and cannot be imported into the current release: invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-namespace" must equal "prod": current value is "dev" -``` - -To fix this error, fix the overlap either by removing `staging` from the first install, or from the second. - -#### Namespace scoped user permissions - -Optionally, you can also configure user rbac to be scoped to the same namespaces as the manager itself. -You might want to do this if you don't give your users permissions throughout the cluster, and want to make sure they only have the minimum set required to perform telepresence commands on certain namespaces. - -Continuing with the `dev` and `staging` example from the previous section, simply add the following to `values.yaml` (make sure you set the `subjects`!): - -```yaml -clientRbac: - create: true - - # These are the users or groups to which the user rbac will be bound. - # This MUST be set. - subjects: {} - # - kind: User - # name: jane - # apiGroup: rbac.authorization.k8s.io - - namespaced: true - - namespaces: - - dev - - staging -``` - -#### Namespace-scoped webhook - -If you wish to use the traffic-manager's [mutating webhook](../../reference/cluster-config#mutating-webhook) with a namespace-scoped traffic manager, you will have to ensure that each namespace has an `app.kubernetes.io/name` label that is identical to its name: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: staging - labels: - app.kubernetes.io/name: staging -``` - -You can also use `kubectl label` to add the label to an existing namespace, e.g.: - -```shell -kubectl label namespace staging app.kubernetes.io/name=staging -``` - -This is required because the mutating webhook will use the name label to find namespaces to operate on. - -**NOTE** This labelling happens automatically in kubernetes >= 1.21. - -### Installing RBAC only - -Telepresence Traffic Manager does require some [RBAC](../../reference/rbac/) for the traffic-manager deployment itself, as well as for users. -To make it easier for operators to introspect / manage RBAC separately, you can use `rbac.only=true` to -only create the rbac-related objects. -Additionally, you can use `clientRbac.create=true` and `managerRbac.create=true` to toggle which subset(s) of RBAC objects you wish to create. diff --git a/docs/v2.5/install/index.md b/docs/v2.5/install/index.md deleted file mode 100644 index 355ad2c5..00000000 --- a/docs/v2.5/install/index.md +++ /dev/null @@ -1,152 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install - -Install Telepresence by running the commands below for your OS. If you are not the administrator of your cluster, you will need [administrative RBAC permissions](../reference/rbac#administrating-telepresence) to install and use Telepresence in your cluster. - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## What's Next? - -Follow one of our [quick start guides](../quick-start/) to start using Telepresence, either with our sample app or in your own environment. - -## Installing nightly versions of Telepresence - -We build and publish the contents of the default branch, [release/v2](https://github.com/telepresenceio/telepresence), of Telepresence -nightly, Monday through Friday, for macOS (Intel and Apple silicon), Linux, and Windows. - -The tags are formatted like so: `vX.Y.Z-nightly-$gitShortHash`. - -`vX.Y.Z` is the most recent release of Telepresence with the patch version (Z) bumped one higher. -For example, if our last release was 2.3.4, nightly builds would start with v2.3.5, until a new -version of Telepresence is released. - -`$gitShortHash` will be the short hash of the git commit of the build. - -Use these URLs to download the most recent nightly build. - - - - -```shell -# Intel Macs -https://app.getambassador.io/download/tel2/darwin/amd64/nightly/telepresence - -# Apple silicon Macs -https://app.getambassador.io/download/tel2/darwin/arm64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/nightly/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/windows/amd64/nightly/telepresence.zip -``` - - - - -## Installing older versions of Telepresence - -Use these URLs to download an older version for your OS (including older nightly builds), replacing `x.y.z` with the versions you want. - - - - -```shell -# Intel Macs -https://app.getambassador.io/download/tel2/darwin/amd64/x.y.z/telepresence - -# Apple silicon Macs -https://app.getambassador.io/download/tel2/darwin/arm64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/linux/amd64/x.y.z/telepresence -``` - - - - -``` -https://app.getambassador.io/download/tel2/windows/amd64/x.y.z/telepresence -``` - - - diff --git a/docs/v2.5/install/migrate-from-legacy.md b/docs/v2.5/install/migrate-from-legacy.md deleted file mode 100644 index 0f227f2a..00000000 --- a/docs/v2.5/install/migrate-from-legacy.md +++ /dev/null @@ -1,109 +0,0 @@ -# Migrate from legacy Telepresence - -Telepresence (formerly referenced as Telepresence 2, which is the current major version) has different mechanics and requires a different mental model from [legacy Telepresence 1](https://www.telepresence.io/docs/v1/) when working with local instances of your services. - -In legacy Telepresence, a pod running a service was swapped with a pod running the Telepresence proxy. This proxy received traffic intended for the service, and sent the traffic onward to the target workstation or laptop. We called this mechanism "swap-deployment". - -In practice, this mechanism, while simple in concept, had some challenges. Losing the connection to the cluster would leave the deployment in an inconsistent state. Swapping the pods would take time. - -Telepresence 2 introduces a [new -architecture](../../reference/architecture/) built around "intercepts" -that addresses these problems. With the new Telepresence, a sidecar -proxy ("traffic agent") is injected onto the pod. The proxy then -intercepts traffic intended for the Pod and routes it to the -workstation/laptop. The advantage of this approach is that the -service is running at all times, and no swapping is used. By using -the proxy approach, we can also do personal intercepts, where rather -than re-routing all traffic to the laptop/workstation, it only -re-routes the traffic designated as belonging to that user, so that -multiple developers can intercept the same service at the same time -without disrupting normal operation or disrupting eacho. - -Please see [the Telepresence quick start](../../quick-start/) for an introduction to running intercepts and [the intercept reference doc](../../reference/intercepts/) for a deep dive into intercepts. - -## Using legacy Telepresence commands - -First please ensure you've [installed Telepresence](../). - -Telepresence is able to translate common legacy Telepresence commands into native Telepresence commands. -So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used -to with the Telepresence binary. - -For example, say you have a deployment (`myserver`) that you want to swap deployment (equivalent to intercept in -Telepresence) with a python server, you could run the following command: - -``` -$ telepresence --swap-deployment myserver --expose 9090 --run python3 -m http.server 9090 -< help text > - -Legacy telepresence command used -Command roughly translates to the following in Telepresence: -telepresence intercept echo-easy --port 9090 -- python3 -m http.server 9090 -running... -Connecting to traffic manager... -Connected to context -Using Deployment myserver -intercepted - Intercept name : myserver - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:9090 - Intercepting : all TCP connections -Serving HTTP on :: port 9090 (http://[::]:9090/) ... -``` - -Telepresence will let you know what the legacy Telepresence command has mapped to and automatically -runs it. So you can get started with Telepresence today, using the commands you are used to -and it will help you learn the Telepresence syntax. - -### Legacy command mapping - -Below is the mapping of legacy Telepresence to Telepresence commands (where they exist and -are supported). - -| Legacy Telepresence Command | Telepresence Command | -|--------------------------------------------------|--------------------------------------------| -| --swap-deployment $workload | intercept $workload | -| --expose localPort[:remotePort] | intercept --port localPort[:remotePort] | -| --swap-deployment $workload --run-shell | intercept $workload -- bash | -| --swap-deployment $workload --run $cmd | intercept $workload -- $cmd | -| --swap-deployment $workload --docker-run $cmd | intercept $workload --docker-run -- $cmd | -| --run-shell | connect -- bash | -| --run $cmd | connect -- $cmd | -| --env-file,--env-json | --env-file, --env-json (haven't changed) | -| --context,--namespace | --context, --namespace (haven't changed) | -| --mount,--docker-mount | --mount, --docker-mount (haven't changed) | - -### Legacy Telepresence command limitations - -Some of the commands and flags from legacy Telepresence either didn't apply to Telepresence or -aren't yet supported in Telepresence. For some known popular commands, such as --method, -Telepresence will include output letting you know that the flag has gone away. For flags that -Telepresence can't translate yet, it will let you know that that flag is "unsupported". - -If Telepresence is missing any flags or functionality that is integral to your usage, please let us know -by [creating an issue](https://github.com/telepresenceio/telepresence/issues) and/or talking to us on our [Slack channel](https://a8r.io/Slack)! - -## Telepresence changes - -Telepresence installs a Traffic Manager in the cluster and Traffic Agents alongside workloads when performing intercepts (including -with `--swap-deployment`) and leaves them. If you use `--swap-deployment`, the intercept will be left once the process -dies, but the agent will remain. There's no harm in leaving the agent running alongside your service, but when you -want to remove them from the cluster, the following Telepresence command will help: -``` -$ telepresence uninstall --help -Uninstall telepresence agents and manager - -Usage: - telepresence uninstall [flags] { --agent |--all-agents | --everything } - -Flags: - -d, --agent uninstall intercept agent on specific deployments - -a, --all-agents uninstall intercept agent on all deployments - -e, --everything uninstall agents and the traffic manager - -h, --help help for uninstall - -n, --namespace string If present, the namespace scope for this CLI request -``` - -Since the new architecture deploys a Traffic Manager into the Ambassador namespace, please take a look at -our [rbac guide](../../reference/rbac) if you run into any issues with permissions while upgrading to Telepresence. diff --git a/docs/v2.5/install/telepresence-pro.md b/docs/v2.5/install/telepresence-pro.md deleted file mode 100644 index f7a86bb1..00000000 --- a/docs/v2.5/install/telepresence-pro.md +++ /dev/null @@ -1,66 +0,0 @@ -import Platform from '@src/components/Platform'; - -# Install Telepresence Pro - -Telepresence Pro is a replacement to Telepresence's User Daemon -that gives you premium features including: -* Creating intercepts on your local machine from Ambassador Cloud. - -The `telepresence-pro` binary must be installed in the same directory as -`telepresence`. When you run `telepresence login` it will automatically be -installed and placed in the correct location. If you are in an air-gapped -environment or need to install it manually, ensure it is placed in the -correct directory. - - - - -```shell -# In this example, we install the binary in `/usr/local/bin/` since that's where `telepresence` -# is installed by default -# Intel Macs -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel-pro/darwin/amd64/$dlVersion$/latest/telepresence-pro -o /usr/local/bin/telepresence-pro -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence-pro - -# Apple silicon Macs -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel-pro/darwin/arm64/$dlVersion$/latest/telepresence-pro -o /usr/local/bin/telepresence-pro -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence-pro -``` - - - - -```shell -# In this example, we install the binary in `/usr/local/bin/` since that's where `telepresence` -# is installed by default -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel-pro/linux/amd64/$dlVersion$/latest/telepresence-pro -o /usr/local/bin/telepresence-pro -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence-pro -``` - - - - -```powershell -# In this example, we install the binary in `/usr/local/bin/` since that's where `telepresence` -# is installed by default -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence-pro.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel-pro/windows/amd64/$dlVersion$/latest/telepresence-pro.exe -o telepresence-exe - -# 2. Move the exe to your path (We recommend the default directory used by telepresence `C:\telepresence`) -Copy-Item "telepresence-pro.exe" -Destination "C:\telepresence\telepresence-pro.exe" -Force -``` - - - - -# Upgrade Telepresence Pro -There are two options to upgrade Telepresence Pro. You can run `telepresence-pro upgrade` to get the latest -version that is compatible with the current Telepresence version you are using or you can remove the binary -and Telepresence will automatically install it next time you `telepresence login`. diff --git a/docs/v2.5/install/upgrade.md b/docs/v2.5/install/upgrade.md deleted file mode 100644 index 10d0ca13..00000000 --- a/docs/v2.5/install/upgrade.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: "How to upgrade your installation of Telepresence and install previous versions." ---- - -import Platform from '@src/components/Platform'; - -# Upgrade Process -The Telepresence CLI will periodically check for new versions and notify you when an upgrade is available. Running the same commands used for installation will replace your current binary with the latest version. - - - - -```shell -# Intel Macs - -# Upgrade via brew: -brew upgrade datawire/blackbird/telepresence - -# OR upgrade manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path by passing in -Path C:\my\custom\path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -After upgrading your CLI you must stop any live Telepresence processes by issuing `telepresence quit`, then upgrade the Traffic Manager by running `telepresence connect` - -**Note** that if the Traffic Manager has been installed via Helm, `telepresence connect` will never upgrade it. If you wish to upgrade a Traffic Manager that was installed via the Helm chart, please see the [the Helm documentation](../helm#upgrading-the-traffic-manager) diff --git a/docs/v2.5/quick-start/TelepresenceQuickStartLanding.js b/docs/v2.5/quick-start/TelepresenceQuickStartLanding.js deleted file mode 100644 index 537a6325..00000000 --- a/docs/v2.5/quick-start/TelepresenceQuickStartLanding.js +++ /dev/null @@ -1,126 +0,0 @@ -import React from 'react'; - -import Icon from '../../../src/components/Icon'; - -import './telepresence-quickstart-landing.less'; - -/** @type React.FC> */ -const RightArrow = (props) => ( - - - -); - -/** @type React.FC<{color: 'green'|'blue', withConnector: boolean}> */ -const Box = ({ children, color = 'blue', withConnector = false }) => ( - <> - {withConnector && ( -
- -
- )} -
{children}
- -); - -const TelepresenceQuickStartLanding = () => ( -
-

- Telepresence -

-

- Explore the use cases of Telepresence with a free remote Kubernetes - cluster, or dive right in using your own. -

- -
-
-
-

- Use Our Free Demo Cluster -

-

- See how Telepresence works without having to mess with your - production environments. -

-
- -

6 minutes

-

Integration Testing

-

- See how changes to a single service impact your entire application - without having to run your entire app locally. -

- - GET STARTED{' '} - - -
- -

5 minutes

-

Fast code changes

-

- Make changes to your service locally and see the results instantly, - without waiting for containers to build. -

- - GET STARTED{' '} - - -
-
-
-
-

- Use Your Cluster -

-

- Understand how Telepresence fits in to your Kubernetes development - workflow. -

-
- -

10 minutes

-

Intercept your service in your cluster

-

- Query services only exposed in your cluster's network. Make changes - and see them instantly in your K8s environment. -

- - GET STARTED{' '} - - -
-
-
- -
-

Watch the Demo

-
-
-

- See Telepresence in action in our 3-minute demo - video that you can share with your teammates. -

-
    -
  • Instant feedback loops
  • -
  • Infinite-scale development environments
  • -
  • Access to your favorite local tools
  • -
  • Easy collaborative development with teammates
  • -
-
-
- -
-
-
-
-); - -export default TelepresenceQuickStartLanding; diff --git a/docs/v2.5/quick-start/demo-node.md b/docs/v2.5/quick-start/demo-node.md deleted file mode 100644 index 5dcbd654..00000000 --- a/docs/v2.5/quick-start/demo-node.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.5/quick-start/demo-node/) diff --git a/docs/v2.5/quick-start/demo-react.md b/docs/v2.5/quick-start/demo-react.md deleted file mode 100644 index 7c7c00cc..00000000 --- a/docs/v2.5/quick-start/demo-react.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Claim a remote demo cluster and learn to use Telepresence to intercept services running in a Kubernetes Cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.5/quick-start/demo-react/) diff --git a/docs/v2.5/quick-start/go.md b/docs/v2.5/quick-start/go.md deleted file mode 100644 index bd3e1e55..00000000 --- a/docs/v2.5/quick-start/go.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -# Telepresence Quick Start - -To use a demo cluster provided by Ambassador Labs to learn how Telepresence can be used to intercept services to speed up local development and debugging, follow [this guide](https://www.getambassador.io/docs/telepresence/2.5/quick-start/go/) diff --git a/docs/v2.5/quick-start/index.md b/docs/v2.5/quick-start/index.md deleted file mode 100644 index ce68a1e1..00000000 --- a/docs/v2.5/quick-start/index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -description: "Start using Telepresence in your own environment. Follow these steps to intercept your service in your cluster." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from '../quick-start/qs-cards' - -# Telepresence Quickstart - -Telepresence is an open source tool that enables you to set up remote development environments for Kubernetes where you can still use all of your favorite local tools like IDEs, debuggers, and profilers. - -## Prerequisites - - - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/), the Kubernetes command-line tool, or the OpenShift Container Platform command-line interface, [oc](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html#cli-installing-cli_cli-developer-commands). - - A Kubernetes Deployment and Service. - - - - **Don’t have access to Kubernetes cluster?** Try Telepresence in a free remote Kubernetes cluster without having to mess with your production environment. [Get Started >](https://app.getambassador.io/cloud/welcome?select=developer&utm_source=telepresence&utm_medium=website&utm_campaign=quickstart). - - - -## Install Telepresence on Your Machine - -Install Telepresence by running the relevant commands below for your OS. If you are not the administrator of your cluster, you will need [administrative RBAC permissions](https://www.getambassador.io/docs/telepresence-oss/latest/reference/rbac/#administrating-telepresence) to install and use the Telepresence traffic-manager in your cluster. - - - - -```shell -# Intel Macs - -# 1. Download the latest binary (~105 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-amd64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - -```shell -# Apple silicon Macs - -# 1. Download the latest binary (~101 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-darwin-arm64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~95 MB): -sudo curl -fL https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-linux-amd64 -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -Installing Telepresence on Windows is easy. Download and run this [installer](https://app.getambassador.io/download/tel2oss/releases/download/$dlVersion$/telepresence-windows-amd64.exe) and follow the on-screen instructions. - - - -## Install Telepresence in Your Cluster - -1. Install the traffic manager into your cluster with `telepresence helm install`. More information about installing Telepresence can be found [here](https://www.getambassador.io/docs/telepresence-oss/latest/install/manager/). This will require root access on your machine. - -``` -$ telepresence helm install -... -Traffic Manager installed successfully -``` - -## Intercept Your Service - -With Telepresence, you can create [global intercepts](https://www.getambassador.io/docs/telepresence-oss/latest/concepts/intercepts/?intercept=global) that intercept all traffic going to a service in your remote cluster and route it to your local environment instead. - -1. Connect to your cluster with `telepresence connect` and connect to the Kubernetes API server: - - ``` - $ telepresence connect - connected to context - - ``` - - ```console - $ curl -ik https://kubernetes.default - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - - The 401 response is expected when you first connect. - - - You now have access to your remote Kubernetes API server as if you were on the same network. You can now use any local tools to connect to any service in the cluster. - -2. Enter `telepresence list` and make sure the service you want to intercept is listed. For example: - - ``` - $ telepresence list - ... - example-service: ready to intercept (traffic-agent not yet installed) - ... - ``` - -3. Get the name of the port you want to intercept on your service: - `kubectl get service --output yaml`. - - For example: - - ```console - $ kubectl get service example-service --output yaml - ... - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - ... - ``` - -4. Intercept all traffic going to the service in your cluster: - `telepresence intercept --port [:] --env-file `. - - - For `--port`: specify the port the local instance of your service is running on. If the intercepted service exposes multiple ports, specify the port you want to intercept after a colon. - - For `--env-file`: specify a file path for Telepresence to write the environment variables that are set in the pod. - The example below shows Telepresence intercepting traffic going to service `example-service`. Requests now reach the service on port `http` in the cluster get routed to `8080` on the workstation and write the environment variables of the service to `~/example-service-intercept.env`. - - ``` - $ telepresence intercept example-service --port 8080:http --env-file ~/example-service-intercept.env - Using Deployment example-service - intercepted - Intercept name: example-service - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Intercepting : all TCP connections - ``` - -5. Start your local environment using the environment variables retrieved in the previous step. - -The following are some examples of how to pass the environment variables to your local process: - -- **Docker:** enter `docker run` and provide the path to the file using the `--env-file` argument. For more information about Docker run commands, see the [Docker command-line reference documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file). -- **Visual Studio Code:** specify the path to the environment variables file in the `envFile` field of your configuration. -- **JetBrains IDE (IntelliJ, WebStorm, PyCharm, GoLand, etc.):** use the [EnvFile plugin](https://plugins.jetbrains.com/plugin/7861-envfile). - -6. Query the environment in which you intercepted a service and verify your local instance being invoked. - All the traffic previously routed to your Kubernetes Service is now routed to your local environment - -## 🎉 You've Unlocked a Faster Development Workflow for Kubernetes with Telepresence - -Now, with Telepresence, you can: - --
- Make changes on the fly and see them reflected when interacting with your remote Kubernetes environment, this is just like hot reloading, but it works across both local and remote environments. -
--
Query services and microservice APIs that are only accessible in your remote cluster's network.
--
Set breakpoints in your IDE and re-route remote traffic to your local machine to investigate bugs with realistic user traffic and API calls.
- - - - **Didn't work?** Make sure the port you're listening on matches the one you specified when you created your intercept. - - - -## What’s Next? -- [Learn about the Telepresence architecture.](https://www.getambassador.io/docs/telepresence-oss/latest/reference/architecture/) -- [Read the Telepresence docs.](https://www.getambassador.io/docs/telepresence-oss/) \ No newline at end of file diff --git a/docs/v2.5/quick-start/qs-cards.js b/docs/v2.5/quick-start/qs-cards.js deleted file mode 100644 index 0d8c7226..00000000 --- a/docs/v2.5/quick-start/qs-cards.js +++ /dev/null @@ -1,69 +0,0 @@ -import Grid from '@material-ui/core/Grid'; -import Paper from '@material-ui/core/Paper'; -import Typography from '@material-ui/core/Typography'; -import { makeStyles } from '@material-ui/core/styles'; -import React from 'react'; - -const useStyles = makeStyles((theme) => ({ - root: { - flexGrow: 1, - textAlign: 'center', - alignItem: 'stretch', - padding: 0, - }, - paper: { - padding: theme.spacing(1), - textAlign: 'center', - color: 'black', - height: '100%', - }, -})); - -export default function CenteredGrid() { - const classes = useStyles(); - - return ( -
- - - - - - Create a Local K8s Dev Environment - - - - Read the advanced guide on how to create your own complete Kubernetes development environment. - - - - - - - - Collaborating - - - - Use preview URLS to collaborate with your colleagues and others - outside of your organization. - - - - - - - - Outbound Sessions - - - - While connected to the cluster, your laptop can interact with - services as if it was another pod in the cluster. - - - - -
- ); -} diff --git a/docs/v2.5/quick-start/qs-go.md b/docs/v2.5/quick-start/qs-go.md deleted file mode 100644 index c2514635..00000000 --- a/docs/v2.5/quick-start/qs-go.md +++ /dev/null @@ -1,399 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Go** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Go application](#3-install-a-sample-go-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Go application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Go. We have versions in Python (Flask), Python (FastAPI), Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-go/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-go.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-go.git - - Cloning into 'edgey-corp-go'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-go/DataProcessingService/` - -3. You will use [Fresh](https://pkg.go.dev/github.com/BUGLAN/fresh) to support auto reloading of the Go server, which we'll use later. Confirm it is installed by running: - `go get github.com/pilu/fresh` - Then start the Go server: - `$GOPATH/bin/fresh` - - ``` - $ go get github.com/pilu/fresh - - $ $GOPATH/bin/fresh - - ... - 10:23:41 app | Welcome to the DataProcessingGoService! - ``` - - - Install Go from here and set your GOPATH if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Go server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Go server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-go/DataProcessingService/main.go` in your editor and change `var color string` from `blue` to `orange`. Save the file and the Go server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## Create a complete development environment using this demo application - -Apply what you've learned from this guide and employ the Emojivoto application in your own local development environment. See the [Creating a local Kubernetes development environment](../../install/qs-go-advanced/) page to learn more. - -## What's Next? - - diff --git a/docs/v2.5/quick-start/qs-java.md b/docs/v2.5/quick-start/qs-java.md deleted file mode 100644 index 26b60de1..00000000 --- a/docs/v2.5/quick-start/qs-java.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Java** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Java application](#3-install-a-sample-java-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Java application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Java. We have versions in Python (FastAPI), Python (Flask), Go, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-java/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-java.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-java.git - - Cloning into 'edgey-corp-java'... - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-java/DataProcessingService/` - -3. Start the Maven server. - `mvn spring-boot:run` - - - Install Java and Maven first if needed. - - - ``` - $ mvn spring-boot:run - - ... - g.d.DataProcessingServiceJavaApplication : Started DataProcessingServiceJavaApplication in 1.408 seconds (JVM running for 1.684) - - ``` - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Java server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Java server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-java/DataProcessingService/src/main/resources/application.properties` in your editor and change `app.default.color` on line 2 from `blue` to `orange`. Save the file then stop and restart your Java server. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.5/quick-start/qs-node.md b/docs/v2.5/quick-start/qs-node.md deleted file mode 100644 index 3280f10a..00000000 --- a/docs/v2.5/quick-start/qs-node.md +++ /dev/null @@ -1,383 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Node.js** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Node.js application](#3-install-a-sample-nodejs-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Node.js application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Node.js. We have versions in Go, Java,Python using Flask, and Python using FastAPI if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-nodejs/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-nodejs.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-nodejs.git - - Cloning into 'edgey-corp-nodejs'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-nodejs/DataProcessingService/` - -3. Install the dependencies and start the Node server: -`npm install && npm start` - - ``` - $ npm install && npm start - - ... - Welcome to the DataProcessingService! - { _: [] } - Server running on port 3000 - ``` - - - Install Node.js from here if needed. - - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Node server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - See this doc for more information on how Telepresence resolves DNS. - - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Node server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-nodejs/DataProcessingService/app.js` in your editor and change line 6 from `blue` to `orange`. Save the file and the Node server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.5/quick-start/qs-python-fastapi.md b/docs/v2.5/quick-start/qs-python-fastapi.md deleted file mode 100644 index 3360261e..00000000 --- a/docs/v2.5/quick-start/qs-python-fastapi.md +++ /dev/null @@ -1,380 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (FastAPI)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - ... - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the FastAPI framework. We have versions in Python (Flask), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python-fastapi/main/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python-fastapi.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python-fastapi.git - - Cloning into 'edgey-corp-python-fastapi'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python-fastapi/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install fastapi uvicorn requests && python app.py - - Collecting fastapi - ... - Application startup complete. - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local service is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python-fastapi/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 17 from `blue` to `orange`. Save the file and the Python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080) and it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.5/quick-start/qs-python.md b/docs/v2.5/quick-start/qs-python.md deleted file mode 100644 index 481487c7..00000000 --- a/docs/v2.5/quick-start/qs-python.md +++ /dev/null @@ -1,391 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; -import Platform from '@src/components/Platform'; -import QSCards from './qs-cards' - - - -# Telepresence Quick Start - **Python (Flask)** - -
-

Contents

- -* [Prerequisites](#prerequisites) -* [1. Install the Telepresence CLI](#1-install-the-telepresence-cli) -* [2. Test Telepresence](#2-test-telepresence) -* [3. Install a sample Python application](#3-install-a-sample-python-application) -* [4. Set up a local development environment](#4-set-up-a-local-development-environment) -* [5. Intercept all traffic to the service](#5-intercept-all-traffic-to-the-service) -* [6. Make a code change](#6-make-a-code-change) -* [7. Create a Preview URL](#7-create-a-preview-url) -* [What's next?](#img-classos-logo-srcimageslogopng-whats-next) - -
- -## Prerequisites - -You’ll need [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or `oc` installed -and set up -([Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#verify-kubectl-configuration) / - [macOS](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#verify-kubectl-configuration) / - [Windows](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/#verify-kubectl-configuration)) -to use a Kubernetes cluster, preferably an empty test cluster. This -document uses `kubectl` in all example commands, but OpenShift -users should have no problem substituting in the `oc` command instead. - - - Need a cluster? We provide free demo clusters preconfigured to follow this quick start. Switch over to that version of the guide here. - - -If you have used Telepresence previously, please first reset your Telepresence deployment with: -`telepresence uninstall --everything`. - -## 1. Install the Telepresence CLI - - - - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/$dlVersion$/telepresence -o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence -``` - - - - -```powershell -# Windows is in Developer Preview, here is how you can install it: -# Make sure you run the following from Powershell as Administrator -# 1. Download the latest windows zip containing telepresence.exe and its dependencies (~50 MB): -curl -fL https://app.getambassador.io/download/tel2/windows/amd64/$dlVersion$/telepresence.zip -o telepresence.zip - -# 2. Unzip the zip file to a suitable directory + cleanup zip -Expand-Archive -Path telepresence.zip -Remove-Item 'telepresence.zip' -cd telepresence - -# 3. Run the install-telepresence.ps1 to install telepresence's dependencies. It will install telepresence to -# C:\telepresence by default, but you can specify a custom path $path with -Path $path -Set-ExecutionPolicy Bypass -Scope Process -.\install-telepresence.ps1 - -# 4. Remove the unzipped directory -cd .. -Remove-Item telepresence -# 5. Close your current Powershell and open a new one. Telepresence should now be usable as telepresence.exe -``` - - - - -## 2. Test Telepresence - -Telepresence connects your local workstation to a remote Kubernetes cluster. - -1. Connect to the cluster: -`telepresence connect` - - ``` - $ telepresence connect - - Launching Telepresence Daemon - ... - Connected to context default (https://) - ``` - - - macOS users: If you receive an error when running Telepresence that the developer cannot be verified, open -
- System Preferences → Security & Privacy → General. -
- Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence connect command. -
- -2. Test that Telepresence is working properly by connecting to the Kubernetes API server: -`curl -ik https://kubernetes.default` - - Didn't work? Make sure you are using Telepresence 2.0.3 or greater, check with telepresence version and upgrade here if needed. - - ``` - $ curl -ik https://kubernetes.default - - HTTP/1.1 401 Unauthorized - Cache-Control: no-cache, private - Content-Type: application/json - Www-Authenticate: Basic realm="kubernetes-master" - Date: Tue, 09 Feb 2021 23:21:51 GMT - Content-Length: 165 - - { - "kind": "Status", - "apiVersion": "v1", - "metadata": { - - }, - "status": "Failure", - "message": "Unauthorized", - "reason": "Unauthorized", - "code": 401 - }% - - ``` - - The 401 response is expected. What's important is that you were able to contact the API. - - - - Congratulations! You’ve just accessed your remote Kubernetes API server, as if you were on the same network! With Telepresence, you’re able to use any tool that you have locally to connect to any service in the cluster. - - -## 3. Install a sample Python application - -Your local workstation may not have the compute or memory resources necessary to run all the services in a multi-service application. In this example, we’ll show you how Telepresence can give you a fast development loop, even in this situation. - - - While Telepresence works with any language, this guide uses a sample app written in Python using the Flask framework. We have versions in Python (FastAPI), Go, Java, and NodeJS if you prefer. - - -1. Start by installing a sample application that consists of multiple services: -`kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml` - - ``` - $ kubectl apply -f https://raw.githubusercontent.com/datawire/edgey-corp-python/master/k8s-config/edgey-corp-web-app-no-mapping.yaml - - deployment.apps/dataprocessingservice created - service/dataprocessingservice created - ... - - ``` - -2. Give your cluster a few moments to deploy the sample application. - - Use `kubectl get pods` to check the status of your pods: - - ``` - $ kubectl get pods - - NAME READY STATUS RESTARTS AGE - verylargedatastore-855c8b8789-z8nhs 1/1 Running 0 78s - verylargejavaservice-7dfddbc95c-696br 1/1 Running 0 78s - dataprocessingservice-5f6bfdcf7b-qvd27 1/1 Running 0 79s - ``` - -3. Once all the pods are in a `Running` state, go to the frontend service in your browser at [http://verylargejavaservice.default:8080](http://verylargejavaservice.default:8080). - -4. You should see the EdgyCorp WebApp with a green title and green pod in the diagram. - - - Congratulations, you can now access services running in your cluster by name from your laptop! - - -## 4. Set up a local development environment -You will now download the repo containing the services' code and run the DataProcessingService service locally. This version of the code has the UI color set to blue instead of green. - - - Confirm first that nothing is running locally on port 3000! If curl localhost:3000 returns Connection refused then you should be good to go. - - -1. Clone the web app’s GitHub repo: -`git clone https://github.com/datawire/edgey-corp-python.git` - - ``` - $ git clone https://github.com/datawire/edgey-corp-python.git - - Cloning into 'edgey-corp-python'... - remote: Enumerating objects: 441, done. - ... - ``` - -2. Change into the repo directory, then into DataProcessingService: -`cd edgey-corp-python/DataProcessingService/` - -3. Install the dependencies and start the Python server. -Python 2.x: `pip install fastapi uvicorn requests && python app.py` -Python 3.x: `pip3 install fastapi uvicorn requests && python3 app.py` - - ``` - $ pip install flask requests && python app.py - - Collecting flask - ... - Welcome to the DataServiceProcessingPythonService! - ... - - ``` - - Install Python from here if needed. - -4. In a **new terminal window**, curl the service running locally to confirm it’s set to blue: -`curl localhost:3000/color` - - ``` - $ curl localhost:3000/color - - "blue" - ``` - - - Victory, your local Python server is running a-ok! - - -## 5. Intercept all traffic to the service -Next, we’ll create an intercept. An intercept is a rule that tells Telepresence where to send traffic. In this example, we will send all traffic destined for the DataProcessingService to the version of the DataProcessingService running locally instead: - -1. Start the intercept with the `intercept` command, setting the service name and port: -`telepresence intercept dataprocessingservice --port 3000` - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - Using Deployment dataprocessingservice - intercepted - Intercept name: dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : all TCP connections - ``` - -2. Go to the frontend service again in your browser. Since the service is now intercepted it can be reached directly by its service name at [http://verylargejavaservice:8080](http://verylargejavaservice:8080). You will now see the blue elements in the app. - - - The frontend’s request to DataProcessingService is being intercepted and rerouted to the Python server on your laptop! - - -## 6. Make a code change -We’ve now set up a local development environment for the DataProcessingService, and we’ve created an intercept that sends traffic in the cluster to our local environment. We can now combine these two concepts to show how we can quickly make and test changes. - -1. Open `edgey-corp-python/DataProcessingService/app.py` in your editor and change `DEFAULT_COLOR` on line 15 from `blue` to `orange`. Save the file and the python server will auto reload. - -2. Now, visit [http://verylargejavaservice:8080](http://verylargejavaservice:8080) again in your browser. You will now see the orange elements in the application. - - - We’ve just shown how we can edit code locally, and immediately see these changes in the cluster. -
- Normally, this process would require a container build, push to registry, and deploy. -
- With Telepresence, these changes happen instantly. -
- -## 7. Create a Preview URL - -Create a personal intercept with a preview URL; meaning that only -traffic coming from the preview URL will be intercepted, so you can -easily share the services you’re working on with your teammates. - -1. Clean up your previous intercept by removing it: -`telepresence leave dataprocessingservice` - -2. Log in to Ambassador Cloud, a web interface for managing and - sharing preview URLs: - - ```console - $ telepresence login - Launching browser authentication flow... - - Login successful. - ``` - - If you are in an environment where Telepresence cannot launch a - local browser for you to interact with, you will need to pass the - [`--apikey` flag to `telepresence - login`](../../reference/client/login/). - -3. Start the intercept again: -`telepresence intercept dataprocessingservice --port 3000` - You will be asked for your ingress layer 3 address; specify the front end service: `verylargejavaservice.default` - Then when asked for the port, type `8080`, for "use TLS", type `n` and finally confirm the layer 5 hostname. - - ``` - $ telepresence intercept dataprocessingservice --port 3000 - - To create a preview URL, telepresence needs to know how requests enter - your cluster. Please Select the ingress to use. - - 1/4: What's your ingress' IP address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [default: dataprocessingservice.default]: verylargejavaservice.default - - 2/4: What's your ingress' TCP port number? - - [default: 80]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different hostname - (TLS-SNI, HTTP "Host" header) to be used in requests. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - -4. Wait a moment for the intercept to start; it will also output a preview URL. Go to this URL in your browser, it will be the orange version of the app. - -5. Now go again to [http://verylargejavaservice:8080](http://verylargejavaservice:8080), it’s still green. - -Normal traffic coming to your app gets the green cluster service, but traffic coming from the preview URL goes to your laptop and gets the orange local service! - - - The Preview URL now shows exactly what is running on your local laptop -- in a way that can be securely shared with anyone you work with. - - -## What's Next? - - diff --git a/docs/v2.5/quick-start/telepresence-quickstart-landing.less b/docs/v2.5/quick-start/telepresence-quickstart-landing.less deleted file mode 100644 index 37304255..00000000 --- a/docs/v2.5/quick-start/telepresence-quickstart-landing.less +++ /dev/null @@ -1,161 +0,0 @@ -@import '~@src/components/Layout/vars.less'; - -.telepresence-quickstart-landing { - font-family: @InterFont; - color: @black; - margin: 0 auto 140px; - max-width: @docs-max-width; - min-width: @docs-min-width; - - h1 { - color: @blue-dark; - font-weight: normal; - letter-spacing: 0.25px; - font-size: 33px; - } - p { - font-size: 0.875rem; - line-height: 24px; - margin: 0; - padding: 0; - } - - .demo-cluster-container { - display: grid; - margin: 40px 0; - grid-template-columns: repeat(2, 1fr); - column-gap: 40px; - @media screen and (max-width: 720px) { - grid-template-columns: repeat(1, 1fr); - row-gap: 50px; - } - } - .main-title-container { - display: flex; - flex-direction: column; - align-items: center; - p { - text-align: center; - font-size: 0.875rem; - } - } - h2.title { - font-size: 1.5rem; - color: @black; - font-weight: normal; - margin: 0 0 10px 0; - padding: 0; - &.underlined { - padding-bottom: 2px; - border-bottom: 3px solid @grey-separator; - text-align: center; - } - strong { - font-weight: 600; - } - } - .reading-time { - color: #7c7c87; - margin: 0; - } - .get-started { - font-size: 0.875rem; - font-weight: 600; - letter-spacing: 1.25px; - display: flex; - align-items: center; - margin: 20px 20px 10px; - text-decoration: none; - &.green { - color: @green; - } - &.green:hover { - color: @green-dark; - } - &.blue { - color: @blue; - } - &.blue:hover { - color: @blue-dark; - } - } - - .box-container { - border: 1.5px solid @grey-separator; - border-radius: 5px; - padding: 10px; - position: relative; - &::before { - content: ''; - position: absolute; - width: 14px; - height: 14px; - border-radius: 50%; - top: 0; - left: 50%; - transform: translate(-50%, -50%); - } - &.green::before { - background: @green; - box-shadow: 0 0 0 5px #00c05b45; - } - &.blue::before { - background: @blue; - box-shadow: 0 0 0 5px #0066ff45; - } - p { - font-size: 0.875rem; - line-height: 24px; - padding: 0; - } - } - .connector-container { - display: flex; - justify-content: center; - span { - background: @grey-separator; - width: 1.5px; - height: 37px; - } - } - - .telepresence-video { - border: 2px solid @grey-separator; - box-shadow: -6px 12px 0px fade(@black, 12%); - border-radius: 8px; - padding: 20px; - h2.telepresence-video-title { - padding: 0; - margin: 0; - } - - strong { - color: @blue; - } - } - - .video-section { - display: grid; - grid-template-columns: 1fr 2fr; - column-gap: 10px; - @media screen and (max-width: 1400px) { - grid-template-columns: 1fr; - } - ul { - font-size: 14px; - margin: 0 10px 6px 0; - } - .video-container { - position: relative; - padding-bottom: 56.25%; // 16:9 aspect ratio - height: 0; - iframe { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - } - } - } -} diff --git a/docs/v2.5/redirects.yml b/docs/v2.5/redirects.yml deleted file mode 100644 index 5961b347..00000000 --- a/docs/v2.5/redirects.yml +++ /dev/null @@ -1 +0,0 @@ -- {from: "", to: "quick-start"} diff --git a/docs/v2.5/reference/architecture.md b/docs/v2.5/reference/architecture.md deleted file mode 100644 index daa208d8..00000000 --- a/docs/v2.5/reference/architecture.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: "How Telepresence works to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Telepresence Architecture - -
- -![Telepresence Architecture](../../../../../images/documentation/telepresence-architecture.inline.svg) - -
- -## Telepresence CLI - -The Telepresence CLI orchestrates all the moving parts: it starts the Telepresence Daemon, installs the Traffic Manager -in your cluster, authenticates against Ambassador Cloud and configure all those elements to communicate with one -another. - -## Telepresence Daemon - -The Telepresence Daemon runs on a developer's workstation and is its main point of communication with the cluster's -network. All requests from and to the cluster go through the Daemon, which communicates with the Traffic Manager. - -When you run telepresence login, Telepresence installs an enhanced Telepresence Daemon. This replaces the open source -User Daemon and allows you to create intercepts on your local machine from Ambassador Cloud. - -## Traffic Manager - -The Traffic Manager is the central point of communication between Traffic Agents in the cluster and Telepresence Daemons -on developer workstations, proxying all relevant inbound and outbound traffic and tracking active intercepts. When -Telepresence is run with either the `connect`, `intercept`, or `list` commands, the Telepresence CLI first checks the -cluster for the Traffic Manager deployment, and if missing it creates it. - -When an intercept gets created with a Preview URL, the Traffic Manager will establish a connection with Ambassador Cloud -so that Preview URL requests can be routed to the cluster. This allows Ambassador Cloud to reach the Traffic Manager -without requiring the Traffic Manager to be publicly exposed. Once the Traffic Manager receives a request from a Preview -URL, it forwards the request to the ingress service specified at the Preview URL creation. - -## Traffic Agent - -The Traffic Agent is a sidecar container that facilitates intercepts. When an intercept is started, the Traffic Agent -container is injected into the workload's pod(s). You can see the Traffic Agent's status by running `kubectl describe pod `. - -Depending on the type of intercept that gets created, the Traffic Agent will either route the incoming request to the -Traffic Manager so that it gets routed to a developer's workstation, or it will pass it along to the container in the -pod usually handling requests on that port. - -## Ambassador Cloud - -Ambassador Cloud enables Preview URLs by generating random ephemeral domain names and routing requests received on those -domains from authorized users to the appropriate Traffic Manager. - -Ambassador Cloud also lets users manage their Preview URLs: making them publicly accessible, seeing users who have -accessed them and deleting them. - -# Changes from Service Preview - -Using Ambassador's previous offering, Service Preview, the Traffic Agent had to be manually added to a pod by an -annotation. This is no longer required as the Traffic Agent is automatically injected when an intercept is started. - -Service Preview also started an intercept via `edgectl intercept`. The `edgectl` CLI is no longer required to intercept -as this functionality has been moved to the Telepresence CLI. - -For both the Traffic Manager and Traffic Agents, configuring Kubernetes ClusterRoles and ClusterRoleBindings is not -required as it was in Service Preview. Instead, the user running Telepresence must already have sufficient permissions in the cluster to add and modify deployments in the cluster. diff --git a/docs/v2.5/reference/client.md b/docs/v2.5/reference/client.md deleted file mode 100644 index 491dbbb8..00000000 --- a/docs/v2.5/reference/client.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: "CLI options for Telepresence to intercept traffic from your Kubernetes cluster to code running on your laptop." ---- - -# Client reference - -The [Telepresence CLI client](../../quick-start) is used to connect Telepresence to your cluster, start and stop intercepts, and create preview URLs. All commands are run in the form of `telepresence `. - -## Commands - -A list of all CLI commands and flags is available by running `telepresence help`, but here is more detail on the most common ones. -You can append `--help` to each command below to get even more information about its usage. - -| Command | Description | -| --- | --- | -| `connect` | Starts the local daemon and connects Telepresence to your cluster and installs the Traffic Manager if it is missing. After connecting, outbound traffic is routed to the cluster so that you can interact with services as if your laptop was another pod (for example, curling a service by it's name) | -| [`login`](login) | Authenticates you to Ambassador Cloud to create, manage, and share [preview URLs](../../howtos/preview-urls/) -| `logout` | Logs out out of Ambassador Cloud | -| `license` | Formats a license from Ambassdor Cloud into a secret that can be [applied to your cluster](../cluster-config#add-license-to-cluster) if you require features of the extension in an air-gapped environment| -| `status` | Shows the current connectivity status | -| `quit` | Tell Telepresence daemons to quit | -| `list` | Lists the current active intercepts | -| `intercept` | Intercepts a service, run followed by the service name to be intercepted and what port to proxy to your laptop: `telepresence intercept --port `. This command can also start a process so you can run a local instance of the service you are intercepting. For example the following will intercept the hello service on port 8000 and start a Python web server: `telepresence intercept hello --port 8000 -- python3 -m http.server 8000`. A special flag `--docker-run` can be used to run the local instance [in a docker container](../docker-run). | -| `leave` | Stops an active intercept: `telepresence leave hello` | -| `preview` | Create or remove [preview URLs](../../howtos/preview-urls) for existing intercepts: `telepresence preview create ` | -| `loglevel` | Temporarily change the log-level of the traffic-manager, traffic-agents, and user and root daemons | -| `gather-logs` | Gather logs from traffic-manager, traffic-agents, user, and root daemons, and export them into a zip file that can be shared with others or included with a github issue. Use `--get-pod-yaml` to include the yaml for the `traffic-manager` and `traffic-agent`s. Use `--anonymize` to replace the actual pod names + namespaces used for the `traffic-manager` and pods containing `traffic-agent`s in the logs. | -| `version` | Show version of Telepresence CLI + Traffic-Manager (if connected) | -| `uninstall` | Uninstalls Telepresence from your cluster, using the `--agent` flag to target the Traffic Agent for a specific workload, the `--all-agents` flag to remove all Traffic Agents from all workloads, or the `--everything` flag to remove all Traffic Agents and the Traffic Manager. -| `dashboard` | Reopens the Ambassador Cloud dashboard in your browser | -| `current-cluster-id` | Get cluster ID for your kubernetes cluster, used for [configuring license](../cluster-config#add-license-to-cluster) in an air-gapped environment | diff --git a/docs/v2.5/reference/client/login.md b/docs/v2.5/reference/client/login.md deleted file mode 100644 index 78335197..00000000 --- a/docs/v2.5/reference/client/login.md +++ /dev/null @@ -1,61 +0,0 @@ -# Telepresence Login - -```console -$ telepresence login --help -Authenticate to Ambassador Cloud - -Usage: - telepresence login [flags] - -Flags: - --apikey string Static API key to use instead of performing an interactive login -``` - -## Description - -Use `telepresence login` to explicitly authenticate with [Ambassador -Cloud](https://www.getambassador.io/docs/cloud). Unless the -[`skipLogin` option](../../config) is set, other commands will -automatically invoke the `telepresence login` interactive login -procedure as nescessary, so it is rarely nescessary to explicitly run -`telepresence login`; it should only be truly nescessary to explictly -run `telepresence login` when you require a non-interactive login. - -The normal interactive login procedure involves launching a web -browser, a user interacting with that web browser, and finally having -the web browser make callbacks to the local Telepresence process. If -it is not possible to do this (perhaps you are using a headless remote -box via SSH, or are using Telepresence in CI), then you may instead -have Ambassador Cloud issue an API key that you pass to `telepresence -login` with the `--apikey` flag. - -## Telepresence - -When you run `telepresence login`, the CLI installs -a Telepresence binary. The Telepresence enhanced free client of the [User -Daemon](../../architecture) communicates with the Ambassador Cloud to -provide fremium features including the ability to create intercepts from -Ambassador Cloud. - -## Acquiring an API key - -1. Log in to Ambassador Cloud at https://app.getambassador.io/ . - -2. Click on your profile icon in the upper-left: ![Screenshot with the - mouse pointer over the upper-left profile icon](./apikey-2.png) - -3. Click on the "API Keys" menu button: ![Screenshot with the mouse - pointer over the "API Keys" menu button](./apikey-3.png) - -4. Click on the "generate new key" button in the upper-right: - ![Screenshot with the mouse pointer over the "generate new key" - button](./apikey-4.png) - -5. Enter a description for the key (perhaps the name of your laptop, - or perhaps the "CI"), and click "generate api key" to create it. - -You may now pass the API key as `KEY` to `telepresence login --apikey=KEY`. - -Telepresence will use that "master" API key to create narrower keys -for different components of Telepresence. You will see these appear -in the Ambassador Cloud web interface. diff --git a/docs/v2.5/reference/client/login/apikey-2.png b/docs/v2.5/reference/client/login/apikey-2.png deleted file mode 100644 index 1379502a..00000000 Binary files a/docs/v2.5/reference/client/login/apikey-2.png and /dev/null differ diff --git a/docs/v2.5/reference/client/login/apikey-3.png b/docs/v2.5/reference/client/login/apikey-3.png deleted file mode 100644 index 4559b784..00000000 Binary files a/docs/v2.5/reference/client/login/apikey-3.png and /dev/null differ diff --git a/docs/v2.5/reference/client/login/apikey-4.png b/docs/v2.5/reference/client/login/apikey-4.png deleted file mode 100644 index 25c6581a..00000000 Binary files a/docs/v2.5/reference/client/login/apikey-4.png and /dev/null differ diff --git a/docs/v2.5/reference/cluster-config.md b/docs/v2.5/reference/cluster-config.md deleted file mode 100644 index 1db27ef7..00000000 --- a/docs/v2.5/reference/cluster-config.md +++ /dev/null @@ -1,312 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; -import { ClusterConfig } from '@src/components/Docs/Telepresence'; - -# Cluster-side configuration - -For the most part, Telepresence doesn't require any special -configuration in the cluster and can be used right away in any -cluster (as long as the user has adequate [RBAC permissions](../rbac) -and the cluster's server version is `1.17.0` or higher). - -However, some advanced features do require some configuration in the -cluster. - -## TLS - -In this example, other applications in the cluster expect to speak TLS to your -intercepted application (perhaps you're using a service-mesh that does -mTLS). - -In order to use `--mechanism=http` (or any features that imply -`--mechanism=http`) you need to tell Telepresence about the TLS -certificates in use. - -Tell Telepresence about the certificates in use by adjusting your -[workload's](../intercepts/#supported-workloads) Pod template to set a couple of -annotations on the intercepted Pods: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ "getambassador.io/inject-terminating-tls-secret": "your-terminating-secret" # optional -+ "getambassador.io/inject-originating-tls-secret": "your-originating-secret" # optional - spec: -+ serviceAccountName: "your-account-that-has-rbac-to-read-those-secrets" - containers: -``` - -- The `getambassador.io/inject-terminating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS server - certificate to use for decrypting and responding to incoming - requests. - - When Telepresence modifies the Service and workload port - definitions to point at the Telepresence Agent sidecar's port - instead of your application's actual port, the sidecar will use this - certificate to terminate TLS. - -- The `getambassador.io/inject-originating-tls-secret` annotation - (optional) names the Kubernetes Secret that contains the TLS - client certificate to use for communicating with your application. - - You will need to set this if your application expects incoming - requests to speak TLS (for example, your - code expects to handle mTLS itself instead of letting a service-mesh - sidecar handle mTLS for it, or the port definition that Telepresence - modified pointed at the service-mesh sidecar instead of at your - application). - - If you do set this, you should to set it to the - same client certificate Secret that you configure the Ambassador - Edge Stack to use for mTLS. - -It is only possible to refer to a Secret that is in the same Namespace -as the Pod. - -The Pod will need to have permission to `get` and `watch` each of -those Secrets. - -Telepresence understands `type: kubernetes.io/tls` Secrets and -`type: istio.io/key-and-cert` Secrets; as well as `type: Opaque` -Secrets that it detects to be formatted as one of those types. - -## Air gapped cluster - -If your cluster is on an isolated network such that it cannot -communicate with Ambassador Cloud, then some additional configuration -is required to acquire a license key in order to use personal -intercepts. - -### Create a license - -1. - -2. Generate a new license (if one doesn't already exist) by clicking *Generate New License*. - -3. You will be prompted for your Cluster ID. Ensure your -kubeconfig context is using the cluster you want to create a license for then -run this command to generate the Cluster ID: - - ``` - $ telepresence current-cluster-id - - Cluster ID: - ``` - -4. Click *Generate API Key* to finish generating the license. - -5. On the licenses page, download the license file associated with your cluster. - -### Add license to cluster -There are two separate ways you can add the license to your cluster: manually creating and deploying -the license secret or having the helm chart manage the secret - -You only need to do one of the two options. - -#### Manual deploy of license secret - -1. Use this command to generate a Kubernetes Secret config using the license file: - - ``` - $ telepresence license -f - - apiVersion: v1 - data: - hostDomain: - license: - kind: Secret - metadata: - creationTimestamp: null - name: systema-license - namespace: ambassador - ``` - -2. Save the output as a YAML file and apply it to your -cluster with `kubectl`. - -3. When deploying the `traffic-manager` chart, you must add the additional values when running `helm install` by putting -the following into a file (for the example we'll assume it's called license-values.yaml) - - ``` - licenseKey: - # This mounts the secret into the traffic-manager - create: true - secret: - # This tells the helm chart not to create the secret since you've created it yourself - create: false - ``` - -4. Install the helm chart into the cluster - - ``` - helm install traffic-manager -n ambassador datawire/telepresence --create-namespace -f license-values.yaml - ``` - -5. Ensure that you have the docker image for the Smart Agent (datawire/ambassador-telepresence-agent:1.11.0) -pulled and in a registry your cluster can pull from. - -6. Have users use the `images` [config key](../config/#images) keys so telepresence uses the aforementioned image for their agent. - -#### Helm chart manages the secret - -1. Get the jwt token from the downloaded license file - - ``` - $ cat ~/Downloads/ambassador.License_for_yourcluster - eyJhbGnotarealtoken.butanexample - ``` - -2. Create the following values file, substituting your real jwt token in for the one used in the example below. -(for this example we'll assume the following is placed in a file called license-values.yaml) - - ``` - licenseKey: - # This mounts the secret into the traffic-manager - create: true - # This is the value from the license file you download. this value is an example and will not work - value: eyJhbGnotarealtoken.butanexample - secret: - # This tells the helm chart to create the secret - create: true - ``` - -3. Install the helm chart into the cluster - - ``` - helm install traffic-manager charts/telepresence -n ambassador --create-namespace -f license-values.yaml - ``` - -Users will now be able to use preview intercepts with the -`--preview-url=false` flag. Even with the license key, preview URLs -cannot be used without enabling direct communication with Ambassador -Cloud, as Ambassador Cloud is essential to their operation. - -If using Helm to install the server-side components, see the chart's [README](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence) to learn how to configure the image registry and license secret. - -Have clients use the [skipLogin](../config/#cloud) key to ensure the cli knows it is operating in an -air-gapped environment. - -## Mutating Webhook - -By default, Telepresence updates the intercepted workload (Deployment, StatefulSet, ReplicaSet) -template to add the [Traffic Agent](../architecture/#traffic-agent) sidecar container and update the -port definitions. If you use GitOps workflows (with tools like ArgoCD) to automatically update your -cluster so that it reflects the desired state from an external Git repository, this behavior can make -your workload out of sync with that external desired state. - -To solve this issue, you can use Telepresence's Mutating Webhook alternative mechanism. Intercepted -workloads will then stay untouched and only the underlying pods will be modified to inject the Traffic -Agent sidecar container and update the port definitions. - -Simply add the `telepresence.getambassador.io/inject-traffic-agent: enabled` annotation to your -workload template's annotations: - -```diff - spec: - template: - metadata: - labels: - service: your-service -+ annotations: -+ telepresence.getambassador.io/inject-traffic-agent: enabled - spec: - containers: -``` - -### Service Port Annotation - -A service port annotation can be added to the workload to make the Mutating Webhook select a specific port -in the service. This is necessary when the service has multiple ports. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-port: https - spec: - containers: -``` - -### Service Name Annotation - -A service name annotation can be added to the workload to make the Mutating Webhook select a specific Kubernetes service. -This is necessary when the workload is exposed by multiple services. - -```diff - spec: - template: - metadata: - labels: - service: your-service - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled -+ telepresence.getambassador.io/inject-service-name: my-service - spec: - containers: -``` - -### Note on Numeric Ports - -If the targetPort of your intercepted service is pointing at a port number, in addition to -injecting the Traffic Agent sidecar, Telepresence will also inject an initContainer that will -reconfigure the pod's firewall rules to redirect traffic to the Traffic Agent. - - -Note that this initContainer requires `NET_ADMIN` capabilities. -If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. - - - -This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. -To enable running as GID 7777 on a specific openshift namespace, run: -oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE - - -If you need to use numeric ports without the aforementioned capabilities, you can [manually install the agent](../intercepts/manual-agent) - -For example, the following service is using a numeric port, so Telepresence would inject an initContainer into it: -```yaml -apiVersion: v1 -kind: Service -metadata: - name: your-service -spec: - type: ClusterIP - selector: - service: your-service - ports: - - port: 80 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: your-service - labels: - service: your-service -spec: - replicas: 1 - selector: - matchLabels: - service: your-service - template: - metadata: - annotations: - telepresence.getambassador.io/inject-traffic-agent: enabled - labels: - service: your-service - spec: - containers: - - name: your-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 -``` diff --git a/docs/v2.5/reference/config.md b/docs/v2.5/reference/config.md deleted file mode 100644 index 6722bc93..00000000 --- a/docs/v2.5/reference/config.md +++ /dev/null @@ -1,285 +0,0 @@ -# Laptop-side configuration - -## Global Configuration -Telepresence uses a `config.yml` file to store and change certain global configuration values that will be used for all clusters you use Telepresence with. The location of this file varies based on your OS: - -* macOS: `$HOME/Library/Application Support/telepresence/config.yml` -* Linux: `$XDG_CONFIG_HOME/telepresence/config.yml` or, if that variable is not set, `$HOME/.config/telepresence/config.yml` -* Windows: `%APPDATA%\telepresence\config.yml` - -For Linux, the above paths are for a user-level configuration. For system-level configuration, use the file at `$XDG_CONFIG_DIRS/telepresence/config.yml` or, if that variable is empty, `/etc/xdg/telepresence/config.yml`. If a file exists at both the user-level and system-level paths, the user-level path file will take precedence. - -### Values - -The config file currently supports values for the `timeouts`, `logLevels`, `images`, `cloud`, and `grpc` keys. - -Here is an example configuration to show you the conventions of how Telepresence is configured: -**note: This config shouldn't be used verbatim, since the registry `privateRepo` used doesn't exist** - -```yaml -timeouts: - agentInstall: 1m - intercept: 10s -logLevels: - userDaemon: debug -images: - registry: privateRepo # This overrides the default docker.io/datawire repo - agentImage: ambassador-telepresence-agent:1.8.0 # This overrides the agent image to inject when intercepting -cloud: - refreshMessages: 24h # Refresh messages from cloud every 24 hours instead of the default, which is 1 week. -grpc: - maxReceiveSize: 10Mi -telepresenceAPI: - port: 9980 -``` - -#### Timeouts - -Values for `timeouts` are all durations either as a number of seconds -or as a string with a unit suffix of `ms`, `s`, `m`, or `h`. Strings -can be fractional (`1.5h`) or combined (`2h45m`). - -These are the valid fields for the `timeouts` key: - -| Field | Description | Type | Default | -|-------------------------|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|------------| -| `agentInstall` | Waiting for Traffic Agent to be installed | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | -| `apply` | Waiting for a Kubernetes manifest to be applied | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 1 minute | -| `clusterConnect` | Waiting for cluster to be connected | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | -| `intercept` | Waiting for an intercept to become active | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | -| `proxyDial` | Waiting for an outbound connection to be established | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 5 seconds | -| `trafficManagerConnect` | Waiting for the Traffic Manager API to connect for port fowards | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 20 seconds | -| `trafficManagerAPI` | Waiting for connection to the gPRC API after `trafficManagerConnect` is successful | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 15 seconds | -| `helm` | Waiting for Helm operations (e.g. `install`) on the Traffic Manager | [int][yaml-int] or [float][yaml-float] number of seconds, or [duration][go-duration] [string][yaml-str] | 2 minutes | - -#### Log Levels - -Values for the `logLevels` fields are one of the following strings, -case insensitive: - - - `trace` - - `debug` - - `info` - - `warning` or `warn` - - `error` - - `fatal` - - `panic` - -For whichever log-level you select, you will get logs labeled with that level and of higher severity. -(e.g. if you use `info`, you will also get logs labeled `error`. You will NOT get logs labeled `debug`. - -These are the valid fields for the `logLevels` key: - -| Field | Description | Type | Default | -|--------------|---------------------------------------------------------------------|---------------------------------------------|---------| -| `userDaemon` | Logging level to be used by the User Daemon (logs to connector.log) | [loglevel][logrus-level] [string][yaml-str] | debug | -| `rootDaemon` | Logging level to be used for the Root Daemon (logs to daemon.log) | [loglevel][logrus-level] [string][yaml-str] | info | - -#### Images -Values for `images` are strings. These values affect the objects that are deployed in the cluster, -so it's important to ensure users have the same configuration. - -Additionally, you can deploy the server-side components with [Helm](../../install/helm), to prevent them -from being overridden by a client's config and use the [mutating-webhook](../cluster-config/#mutating-webhook) -to handle installation of the `traffic-agents`. - -These are the valid fields for the `images` key: - -| Field | Description | Type | Default | -|---------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------|----------------------| -| `registry` | Docker registry to be used for installing the Traffic Manager and default Traffic Agent. If not using a helm chart to deploy server-side objects, changing this value will create a new traffic-manager deployment when using Telepresence commands. Additionally, changing this value will update installed default `traffic-agents` to use the new registry when creating a new intercept. | Docker registry name [string][yaml-str] | `docker.io/datawire` | -| `agentImage` | `$registry/$imageName:$imageTag` to use when installing the Traffic Agent. Changing this value will update pre-existing `traffic-agents` to use this new image. *The `registry` value is not used for the `traffic-agent` if you have this value set.* | qualified Docker image name [string][yaml-str] | (unset) | -| `webhookRegistry` | The container `$registry` that the [Traffic Manager](../cluster-config/#mutating-webhook) will use with the `webhookAgentImage` *This value is only used if a new `traffic-manager` is deployed* | Docker registry name [string][yaml-str] | `docker.io/datawire` | -| `webhookAgentImage` | The container image that the [Traffic Manager](../cluster-config/#mutating-webhook) will pull from the `webhookRegistry` when installing the Traffic Agent in annotated pods *This value is only used if a new `traffic-manager` is deployed* | non-qualified Docker image name [string][yaml-str] | (unset) | - -#### Cloud -Values for `cloud` are listed below and their type varies, so please see the chart for the expected type for each config value. -These fields control how the client interacts with the Cloud service. - -| Field | Description | Type | Default | -|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------|---------| -| `skipLogin` | Whether the CLI should skip automatic login to Ambassador Cloud. If set to true, in order to perform personal intercepts you must have a [license key](../cluster-config/#air-gapped-cluster) installed in the cluster. | [bool][yaml-bool] | false | -| `refreshMessages` | How frequently the CLI should communicate with Ambassador Cloud to get new command messages, which also resets whether the message has been raised or not. You will see each message at most once within the duration given by this config | [duration][go-duration] [string][yaml-str] | 168h | -| `systemaHost` | The host used to communicate with Ambassador Cloud | [string][yaml-str] | app.getambassador.io | -| `systemaPort` | The port used with `systemaHost` to communicate with Ambassador Cloud | [string][yaml-str] | 443 | - -Telepresence attempts to auto-detect if the cluster is capable of -communication with Ambassador Cloud, but may still prompt you to log -in in cases where only the on-laptop client wishes to communicate with -Ambassador Cloud. If you want those auto-login points to be disabled -as well, or would like it to not attempt to communicate with -Ambassador Cloud at all (even for the auto-detection), then be sure to -set the `skipLogin` value to `true`. - -Reminder: To use personal intercepts, which normally require a login, -you must have a license key in your cluster and specify which -`agentImage` should be installed by also adding the following to your -`config.yml`: - -```yaml -images: - agentImage: / -``` - -#### Grpc -The `maxReceiveSize` determines how large a message that the workstation receives via gRPC can be. The default is 4Mi (determined by gRPC). All traffic to and from the cluster is tunneled via gRPC. - -The size is measured in bytes. You can express it as a plain integer or as a fixed-point number using E, G, M, or K. You can also use the power-of-two equivalents: Gi, Mi, Ki. For example, the following represent roughly the same value: -``` -128974848, 129e6, 129M, 123Mi -``` - -#### RESTful API server -The `telepresenceAPI` controls the behavior of Telepresence's RESTful API server that can be queried for additional information about ongoing intercepts. When present, and the `port` is set to a valid port number, it's propagated to the auto-installer so that application containers that can be intercepted gets the `TELEPRESENCE_API_PORT` environment set. The server can then be queried at `localhost:`. In addition, the `traffic-agent` and the `user-daemon` on the workstation that performs an intercept will start the server on that port. -If the `traffic-manager` is auto-installed, its webhook agent injector will be configured to add the `TELEPRESENCE_API_PORT` environment to the app container when the `traffic-agent` is injected. -See [RESTful API server](../restapi) for more info. - -#### Daemons - -`daemons` controls which binary to use for the user daemon. By default it will -use the Telepresence binary. For example, this can be used to tell Telepresence to -use the Telepresence Pro binary. - -| Field | Description | Type | Default | -|--------------------|-------------------------------------------------------------|--------------------|--------------------------------------| -| `userDaemonBinary` | The path to the binary you want to use for the User Daemon. | [string][yaml-str] | The path to Telepresence executable | - - -## Per-Cluster Configuration -Some configuration is not global to Telepresence and is actually specific to a cluster. Thus, we store that config information in your kubeconfig file, so that it is easier to maintain per-cluster configuration. - -### Values -The current per-cluster configuration supports `dns`, `alsoProxy`, and `manager` keys. -To add configuration, simply add a `telepresence.io` entry to the cluster in your kubeconfig like so: - -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - also-proxy: - manager: - name: example-cluster -``` -#### DNS -The fields for `dns` are: local-ip, remote-ip, exclude-suffixes, include-suffixes, and lookup-timeout. - -| Field | Description | Type | Default | -|--------------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------|-----------------------------------------------------------------------------| -| `local-ip` | The address of the local DNS server. This entry is only used on Linux systems that are not configured to use systemd-resolved. | IP address [string][yaml-str] | first `nameserver` mentioned in `/etc/resolv.conf` | -| `remote-ip` | The address of the cluster's DNS service. | IP address [string][yaml-str] | IP of the `kube-dns.kube-system` or the `dns-default.openshift-dns` service | -| `exclude-suffixes` | Suffixes for which the DNS resolver will always fail (or fallback in case of the overriding resolver) | [sequence][yaml-seq] of [strings][yaml-str] | `[".arpa", ".com", ".io", ".net", ".org", ".ru"]` | -| `include-suffixes` | Suffixes for which the DNS resolver will always attempt to do a lookup. Includes have higher priority than excludes. | [sequence][yaml-seq] of [strings][yaml-str] | `[]` | -| `lookup-timeout` | Maximum time to wait for a cluster side host lookup. | [duration][go-duration] [string][yaml-str] | 4 seconds | - -Here is an example kubeconfig: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - dns: - include-suffixes: - - .se - exclude-suffixes: - - .com - name: example-cluster -``` - - -#### AlsoProxy - -When using `also-proxy`, you provide a list of subnets after the key in your kubeconfig file to be added to the TUN device. -All connections to addresses that the subnet spans will be dispatched to the cluster - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: -``` -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - also-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -#### NeverProxy - -When using `never-proxy` you provide a list of subnets after the key in your kubeconfig file. These will never be routed via the -TUN device, even if they fall within the subnets (pod or service) for the cluster. Instead, whatever route they have before -telepresence connects is the route they will keep. - -Here is an example kubeconfig for the subnet `1.2.3.4/32`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - never-proxy: - - 1.2.3.4/32 - name: example-cluster -``` - -##### Using AlsoProxy together with NeverProxy - -Never proxy and also proxy are implemented as routing rules, meaning that when the two conflict, regular routing routes apply. -Usually this means that the most specific route will win. - -So, for example, if an `also-proxy` subnet falls within a broader `never-proxy` subnet: - -```yaml -never-proxy: [10.0.0.0/16] -also-proxy: [10.0.5.0/24] -``` - -Then the specific `also-proxy` of `10.0.5.0/24` will be proxied by the TUN device, whereas the rest of `10.0.0.0/16` will not. - -Conversely if a `never-proxy` subnet is inside a larger `also-proxy` subnet: - -```yaml -also-proxy: [10.0.0.0/16] -never-proxy: [10.0.5.0/24] -``` - -Then all of the also-proxy of `10.0.0.0/16` will be proxied, with the exception of the specific `never-proxy` of `10.0.5.0/24` - -#### Manager - -The `manager` key contains configuration for finding the `traffic-manager` that telepresence will connect to. It supports one key, `namespace`, indicating the namespace where the traffic manager is to be found - -Here is an example kubeconfig that will instruct telepresence to connect to a manager in namespace `staging`: - -```yaml -apiVersion: v1 -clusters: -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - manager: - namespace: staging - name: example-cluster -``` - -[yaml-bool]: https://yaml.org/type/bool.html -[yaml-float]: https://yaml.org/type/float.html -[yaml-int]: https://yaml.org/type/int.html -[yaml-seq]: https://yaml.org/type/seq.html -[yaml-str]: https://yaml.org/type/str.html -[go-duration]: https://pkg.go.dev/time#ParseDuration -[logrus-level]: https://github.com/sirupsen/logrus/blob/v1.8.1/logrus.go#L25-L45 diff --git a/docs/v2.5/reference/dns.md b/docs/v2.5/reference/dns.md deleted file mode 100644 index e38fbc61..00000000 --- a/docs/v2.5/reference/dns.md +++ /dev/null @@ -1,75 +0,0 @@ -# DNS resolution - -The Telepresence DNS resolver is dynamically configured to resolve names using the namespaces of currently active intercepts. Processes running locally on the desktop will have network access to all services in the such namespaces by service-name only. - -All intercepts contribute to the DNS resolver, even those that do not use the `--namespace=` option. This is because `--namespace default` is implied, and in this context, `default` is treated just like any other namespace. - -No namespaces are used by the DNS resolver (not even `default`) when no intercepts are active, which means that no service is available by `` only. Without an active intercept, the namespace qualified DNS name must be used (in the form `.`). - -See this demonstrated below, using the [quick start's](../../quick-start/) sample app services. - -No intercepts are currently running, we'll connect to the cluster and list the services that can be intercepted. - -``` -$ telepresence connect - - Connecting to traffic manager... - Connected to context default (https://) - -$ telepresence list - - web-app-5d568ccc6b : ready to intercept (traffic-agent not yet installed) - emoji : ready to intercept (traffic-agent not yet installed) - web : ready to intercept (traffic-agent not yet installed) - web-app-5d568ccc6b : ready to intercept (traffic-agent not yet installed) - -$ curl web-app:80 - - curl: (6) Could not resolve host: web-app - -``` - -This is expected as Telepresence cannot reach the service yet by short name without an active intercept in that namespace. - -``` -$ curl web-app.emojivoto:80 - - - - - - Emoji Vote - ... -``` - -Using the namespaced qualified DNS name though does work. -Now we'll start an intercept against another service in the same namespace. Remember, `--namespace default` is implied since it is not specified. - -``` -$ telepresence intercept web --port 8080 - - Using Deployment web - intercepted - Intercept name : web - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:8080 - Volume Mount Point: /tmp/telfs-166119801 - Intercepting : HTTP requests that match all headers: - 'x-telepresence-intercept-id: 8eac04e3-bf24-4d62-b3ba-35297c16f5cd:web' - -$ curl webapp:80 - - - - - - Emoji Vote - ... -``` - -Now curling that service by its short name works and will as long as the intercept is active. - -The DNS resolver will always be able to resolve services using `.` regardless of intercepts. - -See [Outbound connectivity](../routing/#dns-resolution) for details on DNS lookups. diff --git a/docs/v2.5/reference/docker-run.md b/docs/v2.5/reference/docker-run.md deleted file mode 100644 index 2262f0a5..00000000 --- a/docs/v2.5/reference/docker-run.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -Description: "How a Telepresence intercept can run a Docker container with configured environment and volume mounts." ---- - -# Using Docker for intercepts - -If you want your intercept to go to a Docker container on your laptop, use the `--docker-run` option. It creates the intercept, runs your container in the foreground, then automatically ends the intercept when the container exits. - -`telepresence intercept --port --docker-run -- ` - -The `--` separates flags intended for `telepresence intercept` from flags intended for `docker run`. - -## Example - -Imagine you are working on a new version of a your frontend service. It is running in your cluster as a Deployment called `frontend-v1`. You use Docker on your laptop to build an improved version of the container called `frontend-v2`. To test it out, use this command to run the new container on your laptop and start an intercept of the cluster service to your local container. - -`telepresence intercept frontend-v1 --port 8000 --docker-run -- frontend-v2` - -## Ports - -The `--port` flag can specify an additional port when `--docker-run` is used so that the local and container port can be different. This is done using `--port :`. The container port will default to the local port when using the `--port ` syntax. - -## Flags - -Telepresence will automatically pass some relevant flags to Docker in order to connect the container with the intercept. Those flags are combined with the arguments given after `--` on the command line. - -- `--dns-search tel2-search` Enables single label name lookups in intercepted namespaces -- `--env-file ` Loads the intercepted environment -- `--name intercept--` Names the Docker container, this flag is omitted if explicitly given on the command line -- `-p ` The local port for the intercept and the container port -- `-v ` Volume mount specification, see CLI help for `--mount` and `--docker-mount` flags for more info diff --git a/docs/v2.5/reference/inside-container.md b/docs/v2.5/reference/inside-container.md deleted file mode 100644 index f83ef357..00000000 --- a/docs/v2.5/reference/inside-container.md +++ /dev/null @@ -1,37 +0,0 @@ -# Running Telepresence inside a container - -It is sometimes desirable to run Telepresence inside a container. One reason can be to avoid any side effects on the workstation's network, another can be to establish multiple sessions with the traffic manager, or even work with different clusters simultaneously. - -## Building the container - -Building a container with a ready-to-run Telepresence is easy because there are relatively few external dependencies. Add the following to a `Dockerfile`: - -```Dockerfile -# Dockerfile with telepresence and its prerequisites -FROM alpine:3.13 - -# Install Telepresence prerequisites -RUN apk add --no-cache curl iproute2 sshfs - -# Download and install the telepresence binary -RUN curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence -o telepresence && \ - install -o root -g root -m 0755 telepresence /usr/local/bin/telepresence -``` -In order to build the container, do this in the same directory as the `Dockerfile`: -``` -$ docker build -t tp-in-docker . -``` - -## Running the container - -Telepresence will need access to the `/dev/net/tun` device on your Linux host (or, in case the host isn't Linux, the Linux VM that Docker starts automatically), and a Kubernetes config that identifies the cluster. It will also need `--cap-add=NET_ADMIN` to create its Virtual Network Interface. - -The command to run the container can look like this: -```bash -$ docker run \ - --cap-add=NET_ADMIN \ - --device /dev/net/tun:/dev/net/tun \ - --network=host \ - -v ~/.kube/config:/root/.kube/config \ - -it --rm tp-in-docker -``` diff --git a/docs/v2.5/reference/intercepts/index.md b/docs/v2.5/reference/intercepts/index.md deleted file mode 100644 index 20b0094d..00000000 --- a/docs/v2.5/reference/intercepts/index.md +++ /dev/null @@ -1,354 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Intercepts - -When intercepting a service, Telepresence installs a *traffic-agent* -sidecar in to the workload. That traffic-agent supports one or more -intercept *mechanisms* that it uses to decide which traffic to -intercept. Telepresence has a simple default traffic-agent, however -you can configure a different traffic-agent with more sophisticated -mechanisms either by setting the [`images.agentImage` field in -`config.yml`](../config/#images) or by writing an -[`extensions/${extension}.yml`][extensions] file that tells -Telepresence about a traffic-agent that it can use, what mechanisms -that traffic-agent supports, and command-line flags to expose to the -user to configure that mechanism. You may tell Telepresence which -known mechanism to use with the `--mechanism=${mechanism}` flag or by -setting one of the `--${mechansim}-XXX` flags, which implicitly set -the mechanism; for example, setting `--http-match=auto` implicitly -sets `--mechanism=http`. - -The default open-source traffic-agent only supports the `tcp` -mechanism, which treats the raw layer 4 TCP streams as opaque and -sends all of that traffic down to the developer's workstation. This -means that it is a "global" intercept, affecting all users of the -cluster. - -In addition to the default open-source traffic-agent, Telepresence -already knows about the Ambassador Cloud -[traffic-agent][ambassador-agent], which supports the `http` -mechanism. The `http` mechanism operates at higher layer, working -with layer 7 HTTP, and may intercept specific HTTP requests, allowing -other HTTP requests through to the regular service. This allows for -"personal" intercepts which only intercept traffic tagged as belonging -to a given developer. - -[extensions]: https://pkg.go.dev/github.com/telepresenceio/telepresence/v2@v$version$/pkg/client/cli/extensions -[ambassador-agent]: https://github.com/telepresenceio/telepresence/blob/release/v2/pkg/client/cli/extensions/builtin.go#L30-L50 - -## Intercept behavior when logged in to Ambassador Cloud - -Logging in to Ambassador Cloud (with [`telepresence -login`](../client/login/)) changes the Telepresence defaults in two -ways. - -First, being logged in to Ambassador Cloud causes Telepresence to -default to `--mechanism=http --http-match=auto --http-path-prefix=/` ( -`--mechanism=http` is redundant. It is implied by other `--http-xxx` flags). -If you hadn't been logged in it would have defaulted to -`--mechanism=tcp`. This tells Telepresence to use the Ambassador -Cloud traffic-agent to do smart "personal" intercepts and only -intercept a subset of HTTP requests, rather than just intercepting the -entirety of all TCP connections. This is important for working in a -shared cluster with teammates, and is important for the preview URL -functionality below. See `telepresence intercept --help` for -information on using the `--http-match` and `--http-path-xxx` flags to -customize which requests that are intercepted. - -Secondly, being logged in causes Telepresence to default to -`--preview-url=true`. If you hadn't been logged in it would have -defaulted to `--preview-url=false`. This tells Telepresence to take -advantage of Ambassador Cloud to create a preview URL for this -intercept, creating a shareable URL that automatically sets the -appropriate headers to have requests coming from the preview URL be -intercepted. In order to create the preview URL, it will prompt you -for four settings about how your cluster's ingress is configured. For -each, Telepresence tries to intelligently detect the correct value for -your cluster; if it detects it correctly, may simply press "enter" and -accept the default, otherwise you must tell Telepresence the correct -value. - -When creating an intercept with the `http` mechanism, the -traffic-agent sends a `GET /telepresence-http2-check` request to your -service and to the process running on your local machine at the port -specified in your intercept, in order to determine if they support -HTTP/2. This is required for the intercepts to behave correctly. If -you do not have a service running locally when the intercept is -created, the traffic-agent will use the result it got from checking -the in-cluster service. - -## Supported workloads - -Kubernetes has various -[workloads](https://kubernetes.io/docs/concepts/workloads/). -Currently, Telepresence supports intercepting (installing a -traffic-agent on) `Deployments`, `ReplicaSets`, and `StatefulSets`. - - - -While many of our examples use Deployments, they would also work on -ReplicaSets and StatefulSets - - - -## Specifying a namespace for an intercept - -The namespace of the intercepted workload is specified using the -`--namespace` option. When this option is used, and `--workload` is -not used, then the given name is interpreted as the name of the -workload and the name of the intercept will be constructed from that -name and the namespace. - -```shell -telepresence intercept hello --namespace myns --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept -`hello-myns`. In order to remove the intercept, you will need to run -`telepresence leave hello-mydns` instead of just `telepresence leave -hello`. - -The name of the intercept will be left unchanged if the workload is specified. - -```shell -telepresence intercept myhello --namespace myns --workload hello --port 9000 -``` - -This will intercept a workload named `hello` and name the intercept `myhello`. - -## Importing environment variables - -Telepresence can import the environment variables from the pod that is -being intercepted, see [this doc](../environment/) for more details. - -## Creating an intercept without a preview URL - -If you *are not* logged in to Ambassador Cloud, the following command -will intercept all traffic bound to the service and proxy it to your -laptop. This includes traffic coming through your ingress controller, -so use this option carefully as to not disrupt production -environments. - -```shell -telepresence intercept --port= -``` - -If you *are* logged in to Ambassador Cloud, setting the -`--preview-url` flag to `false` is necessary. - -```shell -telepresence intercept --port= --preview-url=false -``` - -This will output an HTTP header that you can set on your request for -that traffic to be intercepted: - -```console -$ telepresence intercept --port= --preview-url=false -Using Deployment -intercepted - Intercept name: - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp(":") -``` - -Run `telepresence status` to see the list of active intercepts. - -```console -$ telepresence status -Root Daemon: Running - Version : v2.1.4 (api 3) - Primary DNS : "" - Fallback DNS: "" -User Daemon: Running - Version : v2.1.4 (api 3) - Ambassador Cloud : Logged out - Status : Connected - Kubernetes server : https:// - Kubernetes context: default - Telepresence proxy: ON (networking to the cluster is enabled) - Intercepts : 1 total - dataprocessingnodeservice: @ -``` - -Finally, run `telepresence leave ` to stop the intercept. - -## Skipping the ingress dialogue - -You can skip the ingress dialogue by setting the relevant parameters using flags. If any of the following flags are set, the dialogue will be skipped and the flag values will be used instead. If any of the required flags are missing, an error will be thrown. - -| Flag | Description | Required | -|------------------|------------------------------------------------------------------|------------| -| `--ingress-host` | The ip address for the ingress | yes | -| `--ingress-port` | The port for the ingress | yes | -| `--ingress-tls` | Whether tls should be used | no | -| `--ingress-l5` | Whether a different ip address should be used in request headers | no | - -## Creating an intercept when a service has multiple ports - -If you are trying to intercept a service that has multiple ports, you -need to tell Telepresence which service port you are trying to -intercept. To specify, you can either use the name of the service -port or the port number itself. To see which options might be -available to you and your service, use kubectl to describe your -service or look in the object's YAML. For more information on multiple -ports, see the [Kubernetes documentation][kube-multi-port-services]. - -[kube-multi-port-services]: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - -```console -$ telepresence intercept --port=: -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -When intercepting a service that has multiple ports, the name of the -service port that has been intercepted is also listed. - -If you want to change which port has been intercepted, you can create -a new intercept the same way you did above and it will change which -service port is being intercepted. - -## Creating an intercept When multiple services match your workload - -Oftentimes, there's a 1-to-1 relationship between a service and a -workload, so telepresence is able to auto-detect which service it -should intercept based on the workload you are trying to intercept. -But if you use something like -[Argo](https://www.getambassador.io/docs/argo/latest/), there may be -two services (that use the same labels) to manage traffic between a -canary and a stable service. - -Fortunately, if you know which service you want to use when -intercepting a workload, you can use the `--service` flag. So in the -aforementioned example, if you wanted to use the `echo-stable` service -when intercepting your workload, your command would look like this: - -```console -$ telepresence intercept echo-rollout- --port --service echo-stable -Using ReplicaSet echo-rollout- -intercepted - Intercept name : echo-rollout- - State : ACTIVE - Workload kind : ReplicaSet - Destination : 127.0.0.1:3000 - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-921196036 - Intercepting : all TCP connections -``` - -## Port-forwarding an intercepted container's sidecars - -Sidecars are containers that sit in the same pod as an application -container; they usually provide auxiliary functionality to an -application, and can usually be reached at -`localhost:${SIDECAR_PORT}`. For example, a common use case for a -sidecar is to proxy requests to a database, your application would -connect to `localhost:${SIDECAR_PORT}`, and the sidecar would then -connect to the database, perhaps augmenting the connection with TLS or -authentication. - -When intercepting a container that uses sidecars, you might want those -sidecars' ports to be available to your local application at -`localhost:${SIDECAR_PORT}`, exactly as they would be if running -in-cluster. Telepresence's `--to-pod ${PORT}` flag implements this -behavior, adding port-forwards for the port given. - -```console -$ telepresence intercept --port=: --to-pod= -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Service Port Identifier: - Intercepting : all TCP connections -``` - -If there are multiple ports that you need forwarded, simply repeat the -flag (`--to-pod= --to-pod=`). - -## Intercepting headless services - -Kubernetes supports creating [services without a ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services), -which, when they have a pod selector, serve to provide a DNS record that will directly point to the service's backing pods. -Telepresence supports intercepting these `headless` services as it would a regular service with a ClusterIP. -So, for example, if you have the following service: - -```yaml ---- -apiVersion: v1 -kind: Service -metadata: - name: my-headless -spec: - type: ClusterIP - clusterIP: None - selector: - service: my-headless - ports: - - port: 8080 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: my-headless - labels: - service: my-headless -spec: - replicas: 1 - serviceName: my-headless - selector: - matchLabels: - service: my-headless - template: - metadata: - labels: - service: my-headless - spec: - containers: - - name: my-headless - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -``` - -You can intercept it like any other: - -```console -$ telepresence intercept my-headless --port 8080 -Using StatefulSet my-headless -intercepted - Intercept name : my-headless - State : ACTIVE - Workload kind : StatefulSet - Destination : 127.0.0.1:8080 - Volume Mount Point: /var/folders/j8/kzkn41mx2wsd_ny9hrgd66fc0000gp/T/telfs-524189712 - Intercepting : all TCP connections -``` - - -This utilizes an initContainer that requires `NET_ADMIN` capabilities. -If your cluster administrator has disabled them, you will be unable to use numeric ports with the agent injector. - - - -This requires the Traffic Agent to run as GID 7777. By default, this is disabled on openshift clusters. -To enable running as GID 7777 on a specific openshift namespace, run: -oc adm policy add-scc-to-group anyuid system:serviceaccounts:$NAMESPACE - - - -Intercepting headless services without a selector is not supported. - diff --git a/docs/v2.5/reference/intercepts/manual-agent.md b/docs/v2.5/reference/intercepts/manual-agent.md deleted file mode 100644 index e818171c..00000000 --- a/docs/v2.5/reference/intercepts/manual-agent.md +++ /dev/null @@ -1,221 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Manually injecting the Traffic Agent - -You can directly modify your workload's YAML configuration to add the Telepresence Traffic Agent and enable it to be intercepted. - -When you use a Telepresence intercept, Telepresence automatically edits the workload and services when you use -`telepresence uninstall --agent `. In some GitOps workflows, you may need to use the -[Telepresence Mutating Webhook](../../cluster-config/#mutating-webhook) to keep intercepted workloads unmodified -while you target changes on specific pods. - - -In situations where you don't have access to the proper permissions for numeric ports, as noted in the Note on numeric ports -section of the documentation, it is possible to manually inject the Traffic Agent. Because this is not the recommended approach -to making a workload interceptable, try the Mutating Webhook before proceeding." - - -## Procedure - -You can manually inject the agent into Deployments, StatefulSets, or ReplicaSets. The example on this page -uses the following Deployment: - - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "my-service" - labels: - service: my-service -spec: - replicas: 1 - selector: - matchLabels: - service: my-service - template: - metadata: - labels: - service: my-service - spec: - containers: - - name: echo-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -``` - -The deployment is being exposed by the following service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: "my-service" -spec: - type: ClusterIP - selector: - service: my-service - ports: - - port: 80 - targetPort: 8080 -``` - -### 1. Generating the YAML - -First, generate the YAML for the traffic-agent container: - -```console -$ telepresence genyaml container --container-name echo-container --port 8080 --output - --input deployment.yaml -args: -- agent -env: -- name: TELEPRESENCE_CONTAINER - value: echo-container -- name: _TEL_AGENT_LOG_LEVEL - value: info -- name: _TEL_AGENT_NAME - value: my-service -- name: _TEL_AGENT_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -- name: _TEL_AGENT_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP -- name: _TEL_AGENT_APP_PORT - value: "8080" -- name: _TEL_AGENT_AGENT_PORT - value: "9900" -- name: _TEL_AGENT_MANAGER_HOST - value: traffic-manager.ambassador -image: docker.io/datawire/tel2:2.4.6 -name: traffic-agent -ports: -- containerPort: 9900 - protocol: TCP -readinessProbe: - exec: - command: - - /bin/stat - - /tmp/agent/ready -resources: {} -volumeMounts: -- mountPath: /tel_pod_info - name: traffic-annotations -``` - -Next, generate the YAML for the volume: - -```console -$ telepresence genyaml volume --output - --input deployment.yaml -downwardAPI: - items: - - fieldRef: - fieldPath: metadata.annotations - path: annotations -name: traffic-annotations -``` - - -Enter `telepresence genyaml container --help` or `telepresence genyaml volume --help` for more information about these flags. - - -### 2. Injecting the YAML into the Deployment - -You need to add the `Deployment` YAML you genereated to include the container and the volume. These are placed as elements of `spec.template.spec.containers` and `spec.template.spec.volumes` respectively. -You also need to modify `spec.template.metadata.annotations` and add the annotation `telepresence.getambassador.io/manually-injected: "true"`. -These changes should look like the following: - -```diff -apiVersion: apps/v1 -kind: Deployment -metadata: - name: "my-service" - labels: - service: my-service -spec: - replicas: 1 - selector: - matchLabels: - service: my-service - template: - metadata: - labels: - service: my-service -+ annotations: -+ telepresence.getambassador.io/manually-injected: "true" - spec: - containers: - - name: echo-container - image: jmalloc/echo-server - ports: - - containerPort: 8080 - resources: {} -+ - args: -+ - agent -+ env: -+ - name: TELEPRESENCE_CONTAINER -+ value: echo-container -+ - name: _TEL_AGENT_LOG_LEVEL -+ value: info -+ - name: _TEL_AGENT_NAME -+ value: my-service -+ - name: _TEL_AGENT_NAMESPACE -+ valueFrom: -+ fieldRef: -+ fieldPath: metadata.namespace -+ - name: _TEL_AGENT_POD_IP -+ valueFrom: -+ fieldRef: -+ fieldPath: status.podIP -+ - name: _TEL_AGENT_APP_PORT -+ value: "8080" -+ - name: _TEL_AGENT_AGENT_PORT -+ value: "9900" -+ - name: _TEL_AGENT_MANAGER_HOST -+ value: traffic-manager.ambassador -+ image: docker.io/datawire/tel2:2.4.6 -+ name: traffic-agent -+ ports: -+ - containerPort: 9900 -+ protocol: TCP -+ readinessProbe: -+ exec: -+ command: -+ - /bin/stat -+ - /tmp/agent/ready -+ resources: {} -+ volumeMounts: -+ - mountPath: /tel_pod_info -+ name: traffic-annotations -+ volumes: -+ - downwardAPI: -+ items: -+ - fieldRef: -+ fieldPath: metadata.annotations -+ path: annotations -+ name: traffic-annotations -``` - -### 3. Modifying the service - -Once the modified deployment YAML has been applied to the cluster, you need to modify the Service to route traffic to the Traffic Agent. -You can do this by changing the exposed `targetPort` to `9900`. The resulting service should look like: - -```diff -apiVersion: v1 -kind: Service -metadata: - name: "my-service" -spec: - type: ClusterIP - selector: - service: my-service - ports: - - port: 80 -- targetPort: 8080 -+ targetPort: 9900 -``` diff --git a/docs/v2.5/reference/linkerd.md b/docs/v2.5/reference/linkerd.md deleted file mode 100644 index 9b903fa7..00000000 --- a/docs/v2.5/reference/linkerd.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -Description: "How to get Linkerd meshed services working with Telepresence" ---- - -# Using Telepresence with Linkerd - -## Introduction -Getting started with Telepresence on Linkerd services is as simple as adding an annotation to your Deployment: - -```yaml -spec: - template: - metadata: - annotations: - config.linkerd.io/skip-outbound-ports: "8081" -``` - -The local system and the Traffic Agent connect to the Traffic Manager using its gRPC API on port 8081. Telling Linkerd to skip that port allows the Traffic Agent sidecar to fully communicate with the Traffic Manager, and therefore the rest of the Telepresence system. - -## Prerequisites -1. [Telepresence binary](../../install) -2. Linkerd control plane [installed to cluster](https://linkerd.io/2.10/tasks/install/) -3. Kubectl -4. [Working ingress controller](https://www.getambassador.io/docs/edge-stack/latest/howtos/linkerd2) - -## Deploy -Save and deploy the following YAML. Note the `config.linkerd.io/skip-outbound-ports` annotation in the metadata of the pod template. - -```yaml ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: quote -spec: - replicas: 1 - selector: - matchLabels: - app: quote - strategy: - type: RollingUpdate - template: - metadata: - annotations: - linkerd.io/inject: "enabled" - config.linkerd.io/skip-outbound-ports: "8081,8022,6001" - labels: - app: quote - spec: - containers: - - name: backend - image: docker.io/datawire/quote:0.4.1 - ports: - - name: http - containerPort: 8000 - env: - - name: PORT - value: "8000" - resources: - limits: - cpu: "0.1" - memory: 100Mi -``` - -## Connect to Telepresence -Run `telepresence connect` to connect to the cluster. Then `telepresence list` should show the `quote` deployment as `ready to intercept`: - -``` -$ telepresence list - - quote: ready to intercept (traffic-agent not yet installed) -``` - -## Run the intercept -Run `telepresence intercept quote --port 8080:80` to direct traffic from the `quote` deployment to port 8080 on your local system. Assuming you have something listening on 8080, you should now be able to see your local service whenever attempting to access the `quote` service. diff --git a/docs/v2.5/reference/rbac.md b/docs/v2.5/reference/rbac.md deleted file mode 100644 index 6c39739e..00000000 --- a/docs/v2.5/reference/rbac.md +++ /dev/null @@ -1,291 +0,0 @@ -import Alert from '@material-ui/lab/Alert'; - -# Telepresence RBAC -The intention of this document is to provide a template for securing and limiting the permissions of Telepresence. -This documentation covers the full extent of permissions necessary to administrate Telepresence components in a cluster. - -There are two general categories for cluster permissions with respect to Telepresence. There are RBAC settings for a User and for an Administrator described above. The User is expected to only have the minimum cluster permissions necessary to create a Telepresence [intercept](../../howtos/intercepts/), and otherwise be unable to affect Kubernetes resources. - -In addition to the above, there is also a consideration of how to manage Users and Groups in Kubernetes which is outside of the scope of the document. This document will use Service Accounts to assign Roles and Bindings. Other methods of RBAC administration and enforcement can be found on the [Kubernetes RBAC documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) page. - -## Requirements - -- Kubernetes version 1.16+ -- Cluster admin privileges to apply RBAC - -## Editing your kubeconfig - -This guide also assumes that you are utilizing a kubeconfig file that is specified by the `KUBECONFIG` environment variable. This is a `yaml` file that contains the cluster's API endpoint information as well as the user data being supplied for authentication. The Service Account name used in the example below is called tp-user. This can be replaced by any value (i.e. John or Jane) as long as references to the Service Account are consistent throughout the `yaml`. After an administrator has applied the RBAC configuration, a user should create a `config.yaml` in your current directory that looks like the following:​ - -```yaml -apiVersion: v1 -kind: Config -clusters: -- name: my-cluster # Must match the cluster value in the contexts config - cluster: - ## The cluster field is highly cloud dependent. -contexts: -- name: my-context - context: - cluster: my-cluster # Must match the name field in the clusters config - user: tp-user -users: -- name: tp-user # Must match the name of the Service Account created by the cluster admin - user: - token: # See note below -``` - -The Service Account token will be obtained by the cluster administrator after they create the user's Service Account. Creating the Service Account will create an associated Secret in the same namespace with the format `-token-`. This token can be obtained by your cluster administrator by running `kubectl get secret -n ambassador -o jsonpath='{.data.token}' | base64 -d`. - -After creating `config.yaml` in your current directory, export the file's location to KUBECONFIG by running `export KUBECONFIG=$(pwd)/config.yaml`. You should then be able to switch to this context by running `kubectl config use-context my-context`. - -## Administrating Telepresence - -Telepresence administration requires permissions for creating `Namespaces`, `ServiceAccounts`, `ClusterRoles`, `ClusterRoleBindings`, `Secrets`, `Services`, `MutatingWebhookConfiguration`, and for creating the `traffic-manager` [deployment](../architecture/#traffic-manager) which is typically done by a full cluster administrator. The following permissions are needed for the installation and use of Telepresence: - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: telepresence-admin - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: telepresence-admin-role -rules: - - apiGroups: - - "" - resources: ["pods", "pods/log"] - verbs: ["get", "list", "create", "delete", "watch"] - - apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "update", "create", "delete"] - - apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] - - apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "create", "delete", "watch"] - - apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] - - apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list"] - - apiGroups: - - "rbac.authorization.k8s.io" - resources: ["clusterroles", "clusterrolebindings", "roles", "rolebindings"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch", "create"] - - apiGroups: - - "" - resources: ["secrets"] - verbs: ["get", "create", "list", "delete"] - - apiGroups: - - "" - resources: ["serviceaccounts"] - verbs: ["get", "create", "delete"] - - apiGroups: - - "admissionregistration.k8s.io" - resources: ["mutatingwebhookconfigurations"] - verbs: ["get", "create", "delete"] - - apiGroups: - - "" - resources: ["nodes"] - verbs: ["list", "get", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: telepresence-clusterrolebinding -subjects: - - name: telepresence-admin - kind: ServiceAccount - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - name: telepresence-admin-role - kind: ClusterRole -``` - -There are two ways to install the traffic-manager: Using `telepresence connect` and installing the [helm chart](../../install/helm/). - -By using `telepresence connect`, Telepresence will use your kubeconfig to create the objects mentioned above in the cluster if they don't already exist. If you want the most introspection into what is being installed, we recommend using the helm chart to install the traffic-manager. - -## Cluster-wide telepresence user access - -To allow users to make intercepts across all namespaces, but with more limited `kubectl` permissions, the following `ServiceAccount`, `ClusterRole`, and `ClusterRoleBinding` will allow full `telepresence intercept` functionality. - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate value - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods", "pods/log"] - verbs: ["get", "list", "create", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "update", "watch"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "patch", "watch"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list"] -- apiGroups: - - "rbac.authorization.k8s.io" - resources: ["clusterroles", "clusterrolebindings"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: telepresence-rolebinding -subjects: -- name: tp-user - kind: ServiceAccount - namespace: ambassador -roleRef: - apiGroup: rbac.authorization.k8s.io - name: telepresence-role - kind: ClusterRole -``` - -## Namespace only telepresence user access - -RBAC for multi-tenant scenarios where multiple dev teams are sharing a single cluster where users are constrained to a specific namespace(s). - -The following RBAC configurations assume that there is already a Traffic Manager deployment set up by a Cluster Administrator - -```yaml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tp-user # Update value for appropriate user name - namespace: ambassador # Traffic-Manager is deployed to Ambassador namespace ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-role -rules: -- apiGroups: - - "" - resources: ["pods"] - verbs: ["get", "list", "create", "watch", "delete"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["update"] -- apiGroups: - - "" - resources: ["pods/portforward"] - verbs: ["create"] -- apiGroups: - - "apps" - resources: ["deployments", "replicasets", "statefulsets"] - verbs: ["get", "list", "update", "watch"] -- apiGroups: - - "getambassador.io" - resources: ["hosts", "mappings"] - verbs: ["*"] -- apiGroups: - - "" - resources: ["endpoints"] - verbs: ["get", "list", "watch"] ---- -kind: RoleBinding # RBAC to access ambassador namespace -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: t2-ambassador-binding - namespace: ambassador -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io ---- -kind: RoleBinding # RoleBinding T2 namespace to be intecpeted -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-test-binding # Update "test" for appropriate namespace to be intercepted - namespace: test # Update "test" for appropriate namespace to be intercepted -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-role - apiGroup: rbac.authorization.k8s.io -​ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-role -rules: -- apiGroups: - - "" - resources: ["namespaces"] - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: ["services"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: telepresence-namespace-binding -subjects: -- kind: ServiceAccount - name: tp-user # Should be the same as metadata.name of above ServiceAccount - namespace: ambassador -roleRef: - kind: ClusterRole - name: telepresence-namespace-role - apiGroup: rbac.authorization.k8s.io -``` diff --git a/docs/v2.5/reference/restapi.md b/docs/v2.5/reference/restapi.md deleted file mode 100644 index 462abd19..00000000 --- a/docs/v2.5/reference/restapi.md +++ /dev/null @@ -1,93 +0,0 @@ -# Telepresence RESTful API server - -Telepresence can run a RESTful API server on the local host, both on the local workstation and in a pod that contains a `traffic-agent`. The server currently has two endpoints. The standard `healthz` endpoint and the `consume-here` endpoint. - -## Enabling the server -The server is enabled by setting the `telepresenceAPI.port` to a valid port number in the [Telepresence Helm Chart](https://github.com/telepresenceio/telepresence/tree/release/v2/charts/telepresence). The values may be passed explicitly to Helm during install, or configured using the [Telepresence Config](../config#restful-api-server) to impact an auto-install. - -## Querying the server -On the cluster's side, it's the `traffic-agent` of potentially intercepted pods that runs the server. The server can be accessed using `http://localhost:/` from the application container. Telepresence ensures that the container has the `TELEPRESENCE_API_PORT` environment variable set when the `traffic-agent` is installed. On the workstation, it is the `user-daemon` that runs the server. It uses the `TELEPRESENCE_API_PORT` that is conveyed in the environment of the intercept. This means that the server can be accessed the exact same way locally, provided that the environment is propagated correctly to the interceptor process. - -## Endpoints - -The `consume-here` and `intercept-info` endpoints are both intended to be queried with an optional path query and a set of headers, typically obtained from a Kafka message or similar. Telepresence provides the ID of the intercept in the environment variable [TELEPRESENCE_INTERCEPT_ID](../environment/#telepresence_intercept_id) during an intercept. This ID must be provided in a `x-telepresence-caller-intercept-id: = ` header. Telepresence needs this to identify the caller correctly. The `` will be empty when running in the cluster, but it's harmless to provide it there too, so there's no need for conditional code. - -There are three prerequisites to fulfill before testing The `consume-here` and `intercept-info` endpoints using `curl -v` on the workstation: -1. An intercept must be active -2. The "/healthz" endpoint must respond with OK -3. The ID of the intercept must be known. It will be visible as `ID` in the output of `telepresence list --debug`. - -### healthz -The `http://localhost:/healthz` endpoint should respond with status code 200 OK. If it doesn't then something isn't configured correctly. Check that the `traffic-agent` container is present and that the `TELEPRESENCE_API_PORT` has been added to the environment of the application container and/or in the environment that is propagated to the interceptor that runs on the local workstation. - -#### test endpoint using curl -A `curl -v` call can be used to test the endpoint when an intercept is active. This example assumes that the API port is configured to be 9980. -```console -$ curl -v localhost:9980/healthz -* Trying ::1:9980... -* Connected to localhost (::1) port 9980 (#0) -> GET /healthz HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.76.1 -> Accept: */* -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Date: Fri, 26 Nov 2021 07:06:18 GMT -< Content-Length: 0 -< -* Connection #0 to host localhost left intact -``` - -### consume-here -`http://localhost:/consume-here` will respond with "true" (consume the message) or "false" (leave the message on the queue). When running in the cluster, this endpoint will respond with `false` if the headers match an ongoing intercept for the same workload because it's assumed that it's up to the intercept to consume the message. When running locally, the response is inverted. Matching headers means that the message should be consumed. - -#### test endpoint using curl -Assuming that the API-server runs on port 9980, that the intercept was started with `--http-match x=y --http-path-prefix=/api`, we can now check that the "/consume-here" returns "true" for the path "/api" and given headers. -```console -$ curl -v localhost:9980/consume-here?path=/api -H 'x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest' -H 'x: y' -* Trying ::1:9980... -* Connected to localhost (::1) port 9980 (#0) -> GET /consume-here?path=/api HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.76.1 -> Accept: */* -> x: y -> x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Content-Type: application/json -< Date: Fri, 26 Nov 2021 06:43:28 GMT -< Content-Length: 4 -< -* Connection #0 to host localhost left intact -true -``` - -If you can run curl from the pod, you can try the exact same URL. The result should be "false" when there's an ongoing intercept. The `x-telepresence-caller-intercept-id` is not needed when the call is made from the pod. - -### intercept-info -`http://localhost:/intercept-info` is intended to be queried with an optional path query and a set of headers, typically obtained from a Kafka message or similar, and will respond with a JSON structure containing the two booleans `clientSide` and `intercepted`, and a `metadata` map which corresponds to the `--http-meta` key pairs used when the intercept was created. This field is always omitted in case `intercepted` is `false`. - -#### test endpoint using curl -Assuming that the API-server runs on port 9980, that the intercept was started with `--http-match x=y --http-path-prefix=/api --http-meta a=b --http-meta b=c`, we can now check that the "/intercept-info" returns information for the given path and headers. -```console -$ curl -v localhost:9980/intercept-info?path=/api -H 'x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest' -H 'x: y' -* Trying ::1:9980...* Connected to localhost (127.0.0.1) port 9980 (#0) -> GET /intercept-info?path=/api HTTP/1.1 -> Host: localhost:9980 -> User-Agent: curl/7.79.1 -> Accept: */* -> x: y -> x-telepresence-caller-intercept-id: 4392d394-100e-4f15-a89b-426012f10e05:apitest -> -* Mark bundle as not supporting multiuse -< HTTP/1.1 200 OK -< Content-Type: application/json -< Date: Tue, 01 Feb 2022 11:39:55 GMT -< Content-Length: 68 -< -{"intercepted":true,"clientSide":true,"metadata":{"a":"b","b":"c"}} -* Connection #0 to host localhost left intact -``` diff --git a/docs/v2.5/reference/routing.md b/docs/v2.5/reference/routing.md deleted file mode 100644 index 061ba8fa..00000000 --- a/docs/v2.5/reference/routing.md +++ /dev/null @@ -1,69 +0,0 @@ -# Connection Routing - -## Outbound - -### DNS resolution -When requesting a connection to a host, the IP of that host must be determined. Telepresence provides DNS resolvers to help with this task. There are currently four types of resolvers but only one of them will be used on a workstation at any given time. Common for all of them is that they will propagate a selection of the host lookups to be performed in the cluster. The selection normally includes all names ending with `.cluster.local` or a currently mapped namespace but more entries can be added to the list using the `include-suffixes` option in the -[local DNS configuration](../config/#dns) - -#### Cluster side DNS lookups -The cluster side host lookup will be performed by the traffic-manager unless the client has an active intercept, in which case, the agent performing that intercept will be responsible for doing it. If the client has multiple intercepts, then all of them will be asked to perform the lookup, and the response to the client will contain the unique sum of IPs that they produce. It's therefore important to never have multiple intercepts that span more than one namespace[[1](#namespacelimit)]. The reason for asking all of them is that the workstation currently impersonates multiple containers, and it is not possible to determine on behalf of what container the lookup request is made. - -#### macOS resolver -This resolver hooks into the macOS DNS system by creating files under `/etc/resolver`. Those files correspond to some domain and contain the port number of the Telepresence resolver. Telepresence creates one such file for each of the currently mapped namespaces and `include-suffixes` option. The file `telepresence.local` contains a search path that is configured based on current intercepts so that single label names can be resolved correctly. - -#### Linux systemd-resolved resolver -This resolver registers itself as part of telepresence's [VIF](../tun-device) using `systemd-resolved` and uses the DBus API to configure domains and routes that corresponds to the current set of intercepts and namespaces. - -#### Linux overriding resolver -Linux systems that aren't configured with `systemd-resolved` will use this resolver. A Typical case is when running Telepresence [inside a docker container](../inside-container). During initialization, the resolver will first establish a _fallback_ connection to the IP passed as `--dns`, the one configured as `local-ip` in the [local DNS configuration](../config/#dns), or the primary `nameserver` registered in `/etc/resolv.conf`. It will then use iptables to actually override that IP so that requests to it instead end up in the overriding resolver, which unless it succeeds on its own, will use the _fallback_. - -#### Windows resolver -This resolver uses the DNS resolution capabilities of the [win-tun](https://www.wintun.net/) device in conjunction with [Win32_NetworkAdapterConfiguration SetDNSDomain](https://docs.microsoft.com/en-us/powershell/scripting/samples/performing-networking-tasks?view=powershell-7.2#assigning-the-dns-domain-for-a-network-adapter). - -#### DNS caching -The Telepresence DNS resolver often changes its configuration. This means that Telepresence must either flush the DNS caches on the local host, or ensure that DNS-records returned from the Telepresence resolver aren't cached (or cached for a very short time). All operating systems have different ways of flushing the DNS caches and even different versions of one system may have differences. Also, on some systems it is necessary to actually kill and restart processes to ensure a proper flush, which in turn may result in network instabilities. - -Starting with 2.4.7, Telepresence will no longer flush the host's DNS caches. Instead, all records will have a short Time To Live (TTL) so that such caches evict the entries quickly. This causes increased load on the Telepresence resolver (shorter TTL means more frequent queries) and to cater for that, telepresence now has an internal cache to minimize the number of DNS queries that it sends to the cluster. This cache is flushed as needed without causing instabilities. - -### Routing - -#### Subnets -The Telepresence `traffic-manager` service is responsible for discovering the cluster's service subnet and all subnets used by the pods. In order to do this, it needs permission to create a dummy service[[2](#servicesubnet)] in its own namespace, and the ability to list, get, and watch nodes and pods. Most clusters will expose the pod subnets as `podCIDR` in the `Node` while others, like Amazon EKS, don't. Telepresence will then fall back to deriving the subnets from the IPs of all pods. If you'd like to choose a specific method for discovering subnets, or want to provide the list yourself, you can use the `podCIDRStrategy` configuration value in the [helm](../../install/helm) chart to do that. - -The complete set of subnets that the [VIF](../tun-device) will be configured with is dynamic and may change during a connection's life cycle as new nodes arrive or disappear from the cluster. The set consists of what that the traffic-manager finds in the cluster, and the subnets configured using the [also-proxy](../config#alsoproxy) configuration option. Telepresence will remove subnets that are equal to, or completely covered by, other subnets. - -#### Connection origin -A request to connect to an IP-address that belongs to one of the subnets of the [VIF](../tun-device) will cause a connection request to be made in the cluster. As with host name lookups, the request will originate from the traffic-manager unless the client has ongoing intercepts. If it does, one of the intercepted pods will be chosen, and the request will instead originate from that pod. This is a best-effort approach. Telepresence only knows that the request originated from the workstation. It cannot know that it is intended to originate from a specific pod when multiple intercepts are active. - -A `--local-only` intercept will not have any effect on the connection origin because there is no pod from which the connection can originate. The intercept must be made on a workload that has been deployed in the cluster if there's a requirement for correct connection origin. - -There are multiple reasons for doing this. One is that it is important that the request originates from the correct namespace. Example: - -```bash -curl some-host -``` -results in a http request with header `Host: some-host`. Now, if a service-mesh like Istio performs header based routing, then it will fail to find that host unless the request originates from the same namespace as the host resides in. Another reason is that the configuration of a service mesh can contain very strict rules. If the request then originates from the wrong pod, it will be denied. Only one intercept at a time can be used if there is a need to ensure that the chosen pod is exactly right. - -### Recursion detection -It is common that clusters used in development, such as Minikube, Minishift or k3s, run on the same host as the Telepresence client, often in a Docker container. Such clusters may have access to host network, which means that both DNS and L4 routing may be subjected to recursion. - -#### DNS recursion -When a local cluster's DNS-resolver fails to resolve a hostname, it may fall back to querying the local host network. This means that the Telepresence resolver will be asked to resolve a query that was issued from the cluster. Telepresence must check if such a query is recursive because there is a chance that it actually originated from the Telepresence DNS resolver and was dispatched to the `traffic-manager`, or a `traffic-agent`. - -Telepresence handles this by sending one initial DNS-query to resolve the hostname "tel2-recursion-check.kube-system". If the cluster runs locally, and has access to the local host's network, then that query will recurse back into the Telepresence resolver. Telepresence remembers this and alters its own behavior so that queries that are believed to be recursions are detected and respond with an NXNAME record. Telepresence performs this solution to the best of its ability, but may not be completely accurate in all situations. There's a chance that the DNS-resolver will yield a false negative for the second query if the same hostname is queried more than once in rapid succession, that is when the second query is made before the first query has received a response from the cluster. - -#### Connect recursion -A cluster running locally may dispatch connection attempts to non-existing host:port combinations to the host network. This means that they may reach the Telepresence [VIF](../tun-device). Endless recursions occur if the VIF simply dispatches such attempts on to the cluster. - -The telepresence client handles this by serializing all connection attempts to one specific IP:PORT, trapping all subsequent attempts to connect to that IP:PORT until the first attempt has completed. If the first attempt was deemed a success, then the currently trapped attempts are allowed to proceed. If the first attempt failed, then the currently trapped attempts fail. - -## Inbound - -The traffic-manager and traffic-agent are mutually responsible for setting up the necessary connection to the workstation when an intercept becomes active. In versions prior to 2.3.2, this would be accomplished by the traffic-manager creating a port dynamically that it would pass to the traffic-agent. The traffic-agent would then forward the intercepted connection to that port, and the traffic-manager would forward it to the workstation. This lead to problems when integrating with service meshes like Istio since those dynamic ports needed to be configured. It also imposed an undesired requirement to be able to use mTLS between the traffic-manager and traffic-agent. - -In 2.3.2, this changes, so that the traffic-agent instead creates a tunnel to the traffic-manager using the already existing gRPC API connection. The traffic-manager then forwards that using another tunnel to the workstation. This is completely invisible to other service meshes and is therefore much easier to configure. - -##### Footnotes: -

1: A future version of Telepresence will not allow concurrent intercepts that span multiple namespaces.

-

2: The error message from an attempt to create a service in a bad subnet contains the service subnet. The trick of creating a dummy service is currently the only way to get Kubernetes to expose that subnet.

diff --git a/docs/v2.5/reference/tun-device.md b/docs/v2.5/reference/tun-device.md deleted file mode 100644 index 4410f6f3..00000000 --- a/docs/v2.5/reference/tun-device.md +++ /dev/null @@ -1,27 +0,0 @@ -# Networking through Virtual Network Interface - -The Telepresence daemon process creates a Virtual Network Interface (VIF) when Telepresence connects to the cluster. The VIF ensures that the cluster's subnets are available to the workstation. It also intercepts DNS requests and forwards them to the traffic-manager which in turn forwards them to intercepted agents, if any, or performs a host lookup by itself. - -### TUN-Device -The VIF is a TUN-device, which means that it communicates with the workstation in terms of L3 IP-packets. The router will recognize UDP and TCP packets and tunnel their payload to the traffic-manager via its encrypted gRPC API. The traffic-manager will then establish corresponding connections in the cluster. All protocol negotiation takes place in the client because the VIF takes care of the L3 to L4 translation (i.e. the tunnel is L4, not L3). - -## Gains when using the VIF - -### Both TCP and UDP -The TUN-device is capable of routing both TCP and UDP for outbound traffic. Earlier versions of Telepresence would only allow TCP. Future enhancements might be to also route inbound UDP, and perhaps a selection of ICMP packages (to allow for things like `ping`). - -### No SSH required - -The VIF approach is somewhat similar to using `sshuttle` but without -any requirements for extra software, configuration or connections. -Using the VIF means that only one single connection needs to be -forwarded through the Kubernetes apiserver (à la `kubectl -port-forward`), using only one single port. There is no need for -`ssh` in the client nor for `sshd` in the traffic-manager. This also -means that the traffic-manager container can run as the default user. - -#### sshfs without ssh encryption -When a POD is intercepted, and its volumes are mounted on the local machine, this mount is performed by [sshfs](https://github.com/libfuse/sshfs). Telepresence will run `sshfs -o slave` which means that instead of using `ssh` to establish an encrypted communication to an `sshd`, which in turn terminates the encryption and forwards to `sftp`, the `sshfs` will talk `sftp` directly on its `stdin/stdout` pair. Telepresence tunnels that directly to an `sftp` in the agent using its already encrypted gRPC API. As a result, no `sshd` is needed in client nor in the traffic-agent, and the traffic-agent container can run as the default user. - -### No Firewall rules -With the VIF in place, there's no longer any need to tamper with firewalls in order to establish IP routes. The VIF makes the cluster subnets available during connect, and the kernel will perform the routing automatically. When the session ends, the kernel is also responsible for cleaning up. diff --git a/docs/v2.5/reference/volume.md b/docs/v2.5/reference/volume.md deleted file mode 100644 index 82df9caf..00000000 --- a/docs/v2.5/reference/volume.md +++ /dev/null @@ -1,36 +0,0 @@ -# Volume mounts - -import Alert from '@material-ui/lab/Alert'; - -Telepresence supports locally mounting of volumes that are mounted to your Pods. You can specify a command to run when starting the intercept, this could be a subshell or local server such as Python or Node. - -``` -telepresence intercept --port --mount=/tmp/ -- /bin/bash -``` - -In this case, Telepresence creates the intercept, mounts the Pod's volumes to locally to `/tmp`, and starts a Bash subshell. - -Telepresence can set a random mount point for you by using `--mount=true` instead, you can then find the mount point in the output of `telepresence list` or using the `$TELEPRESENCE_ROOT` variable. - -``` -$ telepresence intercept --port --mount=true -- /bin/bash -Using Deployment -intercepted - Intercept name : - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1: - Volume Mount Point: /var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 - Intercepting : all TCP connections - -bash-3.2$ echo $TELEPRESENCE_ROOT -/var/folders/cp/2r22shfd50d9ymgrw14fd23r0000gp/T/telfs-988349784 -``` - ---mount=true is the default if a mount option is not specified, use --mount=false to disable mounting volumes. - -With either method, the code you run locally either from the subshell or from the intercept command will need to be prepended with the `$TELEPRESENCE_ROOT` environment variable to utilize the mounted volumes. - -For example, Kubernetes mounts secrets to `/var/run/secrets/kubernetes.io` (even if no `mountPoint` for it exists in the Pod spec). Once mounted, to access these you would need to change your code to use `$TELEPRESENCE_ROOT/var/run/secrets/kubernetes.io`. - -If using --mount=true without a command, you can use either environment variable flag to retrieve the variable. diff --git a/docs/v2.5/reference/vpn.md b/docs/v2.5/reference/vpn.md deleted file mode 100644 index ce7ff9c6..00000000 --- a/docs/v2.5/reference/vpn.md +++ /dev/null @@ -1,157 +0,0 @@ - -
- -# Telepresence and VPNs - -## The test-vpn command - -You can make use of the `telepresence test-vpn` command to diagnose issues -with your VPN setup. -This guides you through a series of steps to figure out if there are -conflicts between your VPN configuration and telepresence. - -### Prerequisites - -Before running `telepresence test-vpn` you should ensure that your VPN is -in split-tunnel mode. -This means that only traffic that _must_ pass through the VPN is directed -through it; otherwise, the test results may be inaccurate. - -You may need to configure this on both the client and server sides. -Client-side, taking the Tunnelblick client as an example, you must ensure that -the `Route all IPv4 traffic through the VPN` tickbox is not enabled: - - - -Server-side, taking AWS' ClientVPN as an example, you simply have to enable -split-tunnel mode: - - - -In AWS, this setting can be toggled without reprovisioning the VPN. Other cloud providers may work differently. - -### Testing the VPN configuration - -To run it, enter: - -```console -$ telepresence test-vpn -``` - -The test-vpn tool begins by asking you to disconnect from your VPN; ensure you are disconnected then -press enter: - -``` -Telepresence Root Daemon is already stopped -Telepresence User Daemon is already stopped -Please disconnect from your VPN now and hit enter once you're disconnected... -``` - -Once it's gathered information about your network configuration without an active connection, -it will ask you to connect to the VPN: - -``` -Please connect to your VPN now and hit enter once you're connected... -``` - -It will then connect to the cluster: - - -``` -Launching Telepresence Root Daemon -Launching Telepresence User Daemon -Connected to context arn:aws:eks:us-east-1:914373874199:cluster/josec-tp-test-vpn-cluster (https://07C63820C58A0426296DAEFC73AED10C.gr7.us-east-1.eks.amazonaws.com) -Telepresence Root Daemon quitting... done -Telepresence User Daemon quitting... done -``` - -And show you the results of the test: - -``` ----------- Test Results: -❌ pod subnet 10.0.0.0/19 is masking VPN-routed CIDR 10.0.0.0/16. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/19 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 10.0.0.0/16 are placed in the never-proxy list -✅ svc subnet 10.19.0.0/16 is clear of VPN - -Please see https://www.telepresence.io/docs/latest/reference/vpn for more info on these corrective actions, as well as examples - -Still having issues? Please create a new github issue at https://github.com/telepresenceio/telepresence/issues/new?template=Bug_report.md - Please make sure to add the following to your issue: - * Run `telepresence loglevel debug`, try to connect, then run `telepresence gather_logs`. It will produce a zipfile that you should attach to the issue. - * Which VPN client are you using? - * Which VPN server are you using? - * How is your VPN pushing DNS configuration? It may be useful to add the contents of /etc/resolv.conf -``` - -#### Interpreting test results - -##### Case 1: VPN masked by cluster - -In an instance where the VPN is masked by the cluster, the test-vpn tool informs you that a pod or service subnet is masking a CIDR that the VPN -routes: - -``` -❌ pod subnet 10.0.0.0/19 is masking VPN-routed CIDR 10.0.0.0/16. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/19 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 10.0.0.0/16 are placed in the never-proxy list -``` - -This means that all VPN hosts within `10.0.0.0/19` will be rendered inaccessible while -telepresence is connected. - -The ideal resolution in this case is to move the pods to a different subnet. This is possible, -for example, in Amazon EKS by configuring a [new CIDR range](https://aws.amazon.com/premiumsupport/knowledge-center/eks-multiple-cidr-ranges/) for the pods. -In this case, configuring the pods to be located in `10.1.0.0/19` clears the VPN and allows you -to reach hosts inside the VPC's `10.0.0.0/19` - -However, it is not always possible to move the pods to a different subnet. -In these cases, you should use the [never-proxy](../config#neverproxy) configuration to prevent certain -hosts from being masked. -This might be particularly important for DNS resolution. In an AWS ClientVPN VPN it is often -customary to set the `.2` host as a DNS server (e.g. `10.0.0.2` in this case): - - - -If this is the case for your VPN, you should place the DNS server in the never-proxy list for your -cluster. In your kubeconfig file, add a `telepresence` extension like so: - -```yaml -- cluster: - server: https://127.0.0.1 - extensions: - - name: telepresence.io - extension: - never-proxy: - - 10.0.0.2/32 -``` - -##### Case 2: Cluster masked by VPN - -In an instance where the Cluster is masked by the VPN, the test-vpn tool informs you that a pod or service subnet is being masked by a CIDR -that the VPN routes: - -``` -❌ pod subnet 10.0.0.0/8 being masked by VPN-routed CIDR 10.0.0.0/16. This usually means that Telepresence will not be able to connect to your cluster. To resolve: - * Move pod subnet 10.0.0.0/8 to a subnet not mapped by the VPN - * If this is not possible, consider shrinking the mask of the 10.0.0.0/16 CIDR (e.g. from /16 to /8), or disabling split-tunneling -``` - -Typically this means that pods within `10.0.0.0/8` are not accessible while the VPN is -connected. - -As with the first case, the ideal resolution is to move the pods away, but this may not always -be possible. In that case, your best bet is to attempt to shrink the VPN's CIDR -(that is, make it route more hosts) to make Telepresence's routes win by virtue of specificity. -One easy way to do this may be by disabling split tunneling (see the [prerequisites](#prerequisites) -section for more on split-tunneling). - -Note that once you fix this, you may find yourself landing again in [Case 1](#case-1-vpn-masked-by-cluster), and may need -to use never-proxy rules to whitelist hosts in the VPN: - -``` -❌ pod subnet 10.0.0.0/8 is masking VPN-routed CIDR 0.0.0.0/1. This usually means Telepresence will be able to connect to your cluster, but hosts on your VPN may be inaccessible while telepresence is connected; to resolve: - * Move pod subnet 10.0.0.0/8 to a subnet not mapped by the VPN - * If this is not possible, ensure that any hosts in CIDR 0.0.0.0/1 are placed in the never-proxy list -``` -
diff --git a/docs/v2.5/release-notes/no-ssh.png b/docs/v2.5/release-notes/no-ssh.png deleted file mode 100644 index 025f20ab..00000000 Binary files a/docs/v2.5/release-notes/no-ssh.png and /dev/null differ diff --git a/docs/v2.5/release-notes/run-tp-in-docker.png b/docs/v2.5/release-notes/run-tp-in-docker.png deleted file mode 100644 index 53b66a9b..00000000 Binary files a/docs/v2.5/release-notes/run-tp-in-docker.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.2.png b/docs/v2.5/release-notes/telepresence-2.2.png deleted file mode 100644 index 43abc7e8..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.2.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.0-homebrew.png b/docs/v2.5/release-notes/telepresence-2.3.0-homebrew.png deleted file mode 100644 index e203a975..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.0-homebrew.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.0-loglevels.png b/docs/v2.5/release-notes/telepresence-2.3.0-loglevels.png deleted file mode 100644 index 3d628c54..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.0-loglevels.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.1-alsoProxy.png b/docs/v2.5/release-notes/telepresence-2.3.1-alsoProxy.png deleted file mode 100644 index 4052b927..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.1-alsoProxy.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.1-brew.png b/docs/v2.5/release-notes/telepresence-2.3.1-brew.png deleted file mode 100644 index 2af42490..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.1-brew.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.1-dns.png b/docs/v2.5/release-notes/telepresence-2.3.1-dns.png deleted file mode 100644 index c6335e7a..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.1-dns.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.1-inject.png b/docs/v2.5/release-notes/telepresence-2.3.1-inject.png deleted file mode 100644 index aea1003e..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.1-inject.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.1-large-file-transfer.png b/docs/v2.5/release-notes/telepresence-2.3.1-large-file-transfer.png deleted file mode 100644 index 48ceb381..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.1-large-file-transfer.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.1-trafficmanagerconnect.png b/docs/v2.5/release-notes/telepresence-2.3.1-trafficmanagerconnect.png deleted file mode 100644 index 78128c17..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.1-trafficmanagerconnect.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.2-subnets.png b/docs/v2.5/release-notes/telepresence-2.3.2-subnets.png deleted file mode 100644 index 778c722a..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.2-subnets.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.2-svcport-annotation.png b/docs/v2.5/release-notes/telepresence-2.3.2-svcport-annotation.png deleted file mode 100644 index 1e1e9240..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.2-svcport-annotation.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.3-helm.png b/docs/v2.5/release-notes/telepresence-2.3.3-helm.png deleted file mode 100644 index 7b81480a..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.3-helm.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.3-namespace-config.png b/docs/v2.5/release-notes/telepresence-2.3.3-namespace-config.png deleted file mode 100644 index 7864d3a3..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.3-namespace-config.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.3-to-pod.png b/docs/v2.5/release-notes/telepresence-2.3.3-to-pod.png deleted file mode 100644 index aa7be3f6..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.3-to-pod.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.4-improved-error.png b/docs/v2.5/release-notes/telepresence-2.3.4-improved-error.png deleted file mode 100644 index fa8a1298..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.4-improved-error.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.4-ip-error.png b/docs/v2.5/release-notes/telepresence-2.3.4-ip-error.png deleted file mode 100644 index 1d37380c..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.4-ip-error.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.5-agent-config.png b/docs/v2.5/release-notes/telepresence-2.3.5-agent-config.png deleted file mode 100644 index 67d6d3e8..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.5-agent-config.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.5-grpc-max-receive-size.png b/docs/v2.5/release-notes/telepresence-2.3.5-grpc-max-receive-size.png deleted file mode 100644 index 32939f9d..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.5-grpc-max-receive-size.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.5-skipLogin.png b/docs/v2.5/release-notes/telepresence-2.3.5-skipLogin.png deleted file mode 100644 index bf79c191..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.5-skipLogin.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png b/docs/v2.5/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png deleted file mode 100644 index d29a05ad..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.5-traffic-manager-namespaces.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.7-keydesc.png b/docs/v2.5/release-notes/telepresence-2.3.7-keydesc.png deleted file mode 100644 index 9bffe5cc..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.7-keydesc.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.3.7-newkey.png b/docs/v2.5/release-notes/telepresence-2.3.7-newkey.png deleted file mode 100644 index c7d47c42..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.3.7-newkey.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.0-cloud-messages.png b/docs/v2.5/release-notes/telepresence-2.4.0-cloud-messages.png deleted file mode 100644 index ffd045ae..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.0-cloud-messages.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.0-windows.png b/docs/v2.5/release-notes/telepresence-2.4.0-windows.png deleted file mode 100644 index d27ba254..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.0-windows.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.1-systema-vars.png b/docs/v2.5/release-notes/telepresence-2.4.1-systema-vars.png deleted file mode 100644 index c098b439..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.1-systema-vars.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.4-gather-logs.png b/docs/v2.5/release-notes/telepresence-2.4.4-gather-logs.png deleted file mode 100644 index 7db54173..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.4-gather-logs.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.5-logs-anonymize.png b/docs/v2.5/release-notes/telepresence-2.4.5-logs-anonymize.png deleted file mode 100644 index edd01fde..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.5-logs-anonymize.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.5-pod-yaml.png b/docs/v2.5/release-notes/telepresence-2.4.5-pod-yaml.png deleted file mode 100644 index 3f565c4f..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.5-pod-yaml.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.5-preview-url-questions.png b/docs/v2.5/release-notes/telepresence-2.4.5-preview-url-questions.png deleted file mode 100644 index 1823aaa1..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.5-preview-url-questions.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.6-help-text.png b/docs/v2.5/release-notes/telepresence-2.4.6-help-text.png deleted file mode 100644 index aab9178a..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.6-help-text.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.8-health-check.png b/docs/v2.5/release-notes/telepresence-2.4.8-health-check.png deleted file mode 100644 index e10a0b47..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.8-health-check.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.4.8-vpn.png b/docs/v2.5/release-notes/telepresence-2.4.8-vpn.png deleted file mode 100644 index fbb21588..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.4.8-vpn.png and /dev/null differ diff --git a/docs/v2.5/release-notes/telepresence-2.5.0-pro-daemon.png b/docs/v2.5/release-notes/telepresence-2.5.0-pro-daemon.png deleted file mode 100644 index 5b82fc76..00000000 Binary files a/docs/v2.5/release-notes/telepresence-2.5.0-pro-daemon.png and /dev/null differ diff --git a/docs/v2.5/release-notes/tunnel.jpg b/docs/v2.5/release-notes/tunnel.jpg deleted file mode 100644 index 59a0397e..00000000 Binary files a/docs/v2.5/release-notes/tunnel.jpg and /dev/null differ diff --git a/docs/v2.5/releaseNotes.yml b/docs/v2.5/releaseNotes.yml deleted file mode 100644 index 373be545..00000000 --- a/docs/v2.5/releaseNotes.yml +++ /dev/null @@ -1,1269 +0,0 @@ -# This file should be placed in the folder for the version of the -# product that's meant to be documented. A `/release-notes` page will -# be automatically generated and populated at build time. -# -# Note that an entry needs to be added to the `doc-links.yml` file in -# order to surface the release notes in the table of contents. -# -# The YAML in this file should contain: -# -# changelog: An (optional) URL to the CHANGELOG for the product. -# items: An array of releases with the following attributes: -# - version: The (optional) version number of the release, if applicable. -# - date: The date of the release in the format YYYY-MM-DD. -# - notes: An array of noteworthy changes included in the release, each having the following attributes: -# - type: The type of change, one of `bugfix`, `feature`, `security` or `change`. -# - title: A short title of the noteworthy change. -# - body: >- -# Two or three sentences describing the change and why it -# is noteworthy. This is HTML, not plain text or -# markdown. It is handy to use YAML's ">-" feature to -# allow line-wrapping. -# - image: >- -# The URL of an image that visually represents the -# noteworthy change. This path is relative to the -# `release-notes` directory; if this file is -# `FOO/releaseNotes.yml`, then the image paths are -# relative to `FOO/release-notes/`. -# - docs: The path to the documentation page where additional information can be found. -# - href: A path from the root to a resource on the getambassador website, takes precedence over a docs link. - -docTitle: Telepresence Release Notes -docDescription: >- - Release notes for Telepresence by Ambassador Labs, a CNCF project - that enables developers to iterate rapidly on Kubernetes - microservices by arming them with infinite-scale development - environments, access to instantaneous feedback loops, and highly - customizable development environments. - -changelog: https://github.com/telepresenceio/telepresence/blob/$branch$/CHANGELOG.md - -items: - - version: 2.5.8 - date: "2022-04-27" - notes: - - type: bugfix - title: Folder creation on `telepresence login` - body: >- - Fixed a bug where the telepresence config folder would not be created if the user ran `telepresence login` before other commands. - - version: 2.5.7 - date: "2022-04-25" - notes: - - type: change - title: RBAC requirements - body: >- - A namespaced traffic-manager will no longer require cluster wide RBAC. Only Roles and RoleBindings are now used. - - type: bugfix - title: Windows DNS - body: >- - The DNS recursion detector didn't work correctly on Windows, resulting in sporadic failures to resolve names that were resolved correctly at other times. - - type: bugfix - title: Session TTL and Reconnect - body: >- - A telepresence session will now last for 24 hours after the user's last connectivity. If a session expires, the connector will automatically try to reconnect. - - version: 2.5.6 - date: "2022-04-18" - notes: - - type: change - title: Less Watchers - body: >- - Telepresence agents watcher will now only watch namespaces that the user has accessed since the last `connect`. - - type: bugfix - title: More Efficient `gather-logs` - body: >- - The `gather-logs` command will no longer send any logs through `gRPC`. - - version: 2.5.5 - date: "2022-04-08" - notes: - - type: change - title: Traffic Manager Permissions - body: >- - The traffic-manager now requires permissions to read pods across namespaces even if installed with limited permissions - - type: bugfix - title: Linux DNS Cache - body: >- - The DNS resolver used on Linux with systemd-resolved now flushes the cache when the search path changes. - - type: bugfix - title: Automatic Connect Sync - body: >- - The `telepresence list` command will produce a correct listing even when not preceded by a `telepresence connect`. - - type: bugfix - title: Disconnect Reconnect Stability - body: >- - The root daemon will no longer get into a bad state when a disconnect is rapidly followed by a new connect. - - type: bugfix - title: Limit Watched Namespaces - body: >- - The client will now only watch agents from accessible namespaces, and is also constrained to namespaces explicitly mapped using the `connect` command's `--mapped-namespaces` flag. - - type: bugfix - title: Limit Namespaces used in `gather-logs` - body: >- - The `gather-logs` command will only gather traffic-agent logs from accessible namespaces, and is also constrained to namespaces explicitly mapped using the `connect` command's `--mapped-namespaces` flag. - - version: 2.5.4 - date: "2022-03-29" - notes: - - type: bugfix - title: Linux DNS Concurrency - body: >- - The DNS fallback resolver on Linux now correctly handles concurrent requests without timing them out - - type: bugfix - title: Non-Functional Flag - body: >- - The ingress-l5 flag will no longer be forcefully set to equal the --ingress-host flag - - type: bugfix - title: Automatically Remove Failed Intercepts - body: >- - Intercepts that fail to create are now consistently removed to prevent non-working dangling intercepts from sticking around. - - type: bugfix - title: Agent UID - body: >- - Agent container is no longer sensitive to a random UID or an UID imposed by a SecurityContext. - - type: bugfix - title: Gather-Logs Output Filepath - body: >- - Removed a bad concatenation that corrupted the output path of `telepresence gather-logs`. - - type: change - title: Remove Unnecessary Error Advice - body: >- - An advice to "see logs for details" is no longer printed when the argument count is incorrect in a CLI command. - - type: bugfix - title: Garbage Collection - body: >- - Client and agent sessions no longer leaves dangling waiters in the traffic-manager when they depart. - - type: bugfix - title: Limit Gathered Logs - body: >- - The client's gather logs command and agent watcher will now respect the configured grpc.maxReceiveSize - - type: change - title: In-Cluster Checks - body: >- - The TUN device will no longer route pod or service subnets if it is running in a machine that's already connected to the cluster - - type: change - title: Expanded Status Command - body: >- - The status command includes the install id, user id, account id, and user email in its result, and can print output as JSON - - type: change - title: List Command Shows All Intercepts - body: >- - The list command, when used with the `--intercepts` flag, will list the users intercepts from all namespaces - - version: 2.5.3 - date: "2022-02-25" - notes: - - type: bugfix - title: TCP Connectivity - body: >- - Fixed bug in the TCP stack causing timeouts after repeated connects to the same address - - type: feature - title: Linux Binaries - body: >- - Client-side binaries for the arm64 architecture are now available for linux - - version: 2.5.2 - date: "2022-02-23" - notes: - - type: bugfix - title: DNS server bugfix - body: >- - Fixed a bug where Telepresence would use the last server in resolv.conf - - version: 2.5.1 - date: "2022-02-19" - notes: - - type: bugfix - title: Fix GKE auth issue - body: >- - Fixed a bug where using a GKE cluster would error with: No Auth Provider found for name "gcp" - - version: 2.5.0 - date: "2022-02-18" - notes: - - type: feature - title: Intercept specific endpoints - body: >- - The flags --http-path-equal, --http-path-prefix, and --http-path-regex can can be used in - addition to the --http-match flag to filter personal intercepts by the request URL path - docs: concepts/intercepts#intercepting-a-specific-endpoint - - type: feature - title: Intercept metadata - body: >- - The flag --http-meta can be used to declare metadata key value pairs that will be returned by the Telepresence rest - API endpoint /intercept-info - docs: reference/restapi#intercept-info - - type: change - title: Client RBAC watch - body: >- - The verb "watch" was added to the set of required verbs when accessing services and workloads for the client RBAC - ClusterRole - docs: reference/rbac - - type: change - title: Dropped backward compatibility with versions <=2.4.4 - body: >- - Telepresence is no longer backward compatible with versions 2.4.4 or older because the deprecated multiplexing tunnel - functionality was removed. - - type: change - title: No global networking flags - body: >- - The global networking flags are no longer used and using them will render a deprecation warning unless they are supported by the - command. The subcommands that support networking flags are connect, current-cluster-id, - and genyaml. - - type: bugfix - title: Output of status command - body: >- - The also-proxy and never-proxy subnets are now displayed correctly when using the - telepresence status command. - - type: bugfix - title: SETENV sudo privilege no longer needed - body: >- - Telepresence longer requires SETENV privileges when starting the root daemon. - - type: bugfix - title: Network device names containing dash - body: >- - Telepresence will now parse device names containing dashes correctly when determining routes that it should never block. - - type: bugfix - title: Linux uses cluster.local as domain instead of search - body: >- - The cluster domain (typically "cluster.local") is no longer added to the DNS search on Linux using - systemd-resolved. Instead, it is added as a domain so that names ending with it are routed - to the DNS server. - - version: 2.4.11 - date: "2022-02-10" - notes: - - type: change - title: Add additional logging to troubleshoot intermittent issues with intercepts - body: >- - We've noticed some issues with intercepts in v2.4.10, so we are releasing a version - with enhanced logging to help debug and fix the issue. - - version: 2.4.10 - date: "2022-01-13" - notes: - - type: feature - title: Application Protocol Strategy - body: >- - The strategy used when selecting the application protocol for personal intercepts can now be configured using - the intercept.appProtocolStrategy in the config.yml file. - docs: reference/config/#intercept - image: telepresence-2.4.10-intercept-config.png - - type: feature - title: Helm value for the Application Protocol Strategy - body: >- - The strategy when selecting the application protocol for personal intercepts in agents injected by the - mutating webhook can now be configured using the agentInjector.appProtocolStrategy in the Helm chart. - docs: install/helm - - type: feature - title: New --http-plaintext option - body: >- - The flag --http-plaintext can be used to ensure that an intercept uses plaintext http or grpc when - communicating with the workstation process. - docs: reference/intercepts/#tls - - type: feature - title: Configure the default intercept port - body: >- - The port used by default in the telepresence intercept command (8080), can now be changed by setting - the intercept.defaultPort in the config.yml file. - docs: reference/config/#intercept - - type: change - title: Telepresence CI now uses Github Actions - body: >- - Telepresence now uses Github Actions for doing unit and integration testing. It is - now easier for contributors to run tests on PRs since maintainers can add an - "ok to test" label to PRs (including from forks) to run integration tests. - docs: https://github.com/telepresenceio/telepresence/actions - image: telepresence-2.4.10-actions.png - - type: bugfix - title: Check conditions before asking questions - body: >- - User will not be asked to log in or add ingress information when creating an intercept until a check has been - made that the intercept is possible. - docs: reference/intercepts/ - - type: bugfix - title: Fix invalid log statement - body: >- - Telepresence will no longer log invalid: "unhandled connection control message: code DIAL_OK" errors. - - type: bugfix - title: Log errors from sshfs/sftp - body: >- - Output to stderr from the traffic-agent's sftp and the client's sshfs processes - are properly logged as errors. - - type: bugfix - title: Don't use Windows path separators in workload pod template - body: >- - Auto installer will no longer not emit backslash separators for the /tel-app-mounts paths in the - traffic-agent container spec when running on Windows. - - version: 2.4.9 - date: "2021-12-09" - notes: - - type: bugfix - title: Helm upgrade nil pointer error - body: >- - A helm upgrade using the --reuse-values flag no longer fails on a "nil pointer" error caused by a nil - telpresenceAPI value. - docs: install/helm#upgrading-the-traffic-manager - - version: 2.4.8 - date: "2021-12-03" - notes: - - type: feature - title: VPN diagnostics tool - body: >- - There is a new subcommand, test-vpn, that can be used to diagnose connectivity issues with a VPN. - See the VPN docs for more information on how to use it. - docs: reference/vpn - image: telepresence-2.4.8-vpn.png - - - type: feature - title: RESTful API service - body: >- - A RESTful service was added to Telepresence, both locally to the client and to the traffic-agent to - help determine if messages with a set of headers should be consumed or not from a message queue where the - intercept headers are added to the messages. - docs: reference/restapi - image: telepresence-2.4.8-health-check.png - - - type: change - title: TELEPRESENCE_LOGIN_CLIENT_ID env variable no longer used - body: >- - You could previously configure this value, but there was no reason to change it, so the value - was removed. - - - type: bugfix - title: Tunneled network connections behave more like ordinary TCP connections. - body: >- - When using Telepresence with an external cloud provider for extensions, those tunneled - connections now behave more like TCP connections, especially when it comes to timeouts. - We've also added increased testing around these types of connections. - - version: 2.4.7 - date: "2021-11-24" - notes: - - type: feature - title: Injector service-name annotation - body: >- - The agent injector now supports a new annotation, telepresence.getambassador.io/inject-service-name, that can be used to set the name of the service to be intercepted. - This will help disambiguate which service to intercept for when a workload is exposed by multiple services, such as can happen with Argo Rollouts - docs: reference/cluster-config#service-name-annotation - - type: feature - title: Skip the Ingress Dialogue - body: >- - You can now skip the ingress dialogue by setting the ingress parameters in the corresponding flags. - docs: reference/intercepts#skipping-the-ingress-dialogue - - type: feature - title: Never proxy subnets - body: >- - The kubeconfig extensions now support a never-proxy argument, - analogous to also-proxy, that defines a set of subnets that - will never be proxied via telepresence. - docs: reference/config#neverproxy - - type: change - title: Daemon versions check - body: >- - Telepresence now checks the versions of the client and the daemons and asks the user to quit and restart if they don't match. - - type: change - title: No explicit DNS flushes - body: >- - Telepresence DNS now uses a very short TTL instead of explicitly flushing DNS by killing the mDNSResponder or doing resolvectl flush-caches - docs: reference/routing#dns-caching - - type: bugfix - title: Legacy flags now work with global flags - body: >- - Legacy flags such as `--swap-deployment` can now be used together with global flags. - - type: bugfix - title: Outbound connection closing - body: >- - Outbound connections are now properly closed when the peer closes. - - type: bugfix - title: Prevent DNS recursion - body: >- - The DNS-resolver will trap recursive resolution attempts (may happen when the cluster runs in a docker-container on the client). - docs: reference/routing#dns-recursion - - type: bugfix - title: Prevent network recursion - body: >- - The TUN-device will trap failed connection attempts that results in recursive calls back into the TUN-device (may happen when the - cluster runs in a docker-container on the client). - docs: reference/routing#connect-recursion - - type: bugfix - title: Traffic Manager deadlock fix - body: >- - The Traffic Manager no longer runs a risk of entering a deadlock when a new Traffic agent arrives. - - type: bugfix - title: webhookRegistry config propagation - body: >- - The configured webhookRegistry is now propagated to the webhook installer even if no webhookAgentImage has been set. - docs: reference/config#images - - type: bugfix - title: Login refreshes expired tokens - body: >- - When a user's token has expired, telepresence login - will prompt the user to log in again to get a new token. Previously, - the user had to telepresence quit and telepresence logout - to get a new token. - docs: https://github.com/telepresenceio/telepresence/issues/2062 - - version: 2.4.6 - date: "2021-11-02" - notes: - - type: feature - title: Manually injecting Traffic Agent - body: >- - Telepresence now supports manually injecting the traffic-agent YAML into workload manifests. - Use the genyaml command to create the sidecar YAML, then add the telepresence.getambassador.io/manually-injected: "true" annotation to your pods to allow Telepresence to intercept them. - docs: reference/intercepts/manual-agent - - - type: feature - title: Telepresence CLI released for Apple silicon - body: >- - Telepresence is now built and released for Apple silicon. - docs: install/?os=macos - - - type: change - title: Telepresence help text now links to telepresence.io - body: >- - We now include a link to our documentation when you run telepresence --help. This will make it easier - for users to find this page whether they acquire Telepresence through Brew or some other mechanism. - image: telepresence-2.4.6-help-text.png - - - type: bugfix - title: Fixed bug when API server is inside CIDR range of pods/services - body: >- - If the API server for your kubernetes cluster had an IP that fell within the - subnet generated from pods/services in a kubernetes cluster, it would proxy traffic - to the API server which would result in hanging or a failed connection. We now ensure - that the API server is explicitly not proxied. - - version: 2.4.5 - date: "2021-10-15" - notes: - - type: feature - title: Get pod yaml with gather-logs command - body: >- - Adding the flag --get-pod-yaml to your request will get the - pod yaml manifest for all kubernetes components you are getting logs for - ( traffic-manager and/or pods containing a - traffic-agent container). This flag is set to false - by default. - docs: reference/client - image: telepresence-2.4.5-pod-yaml.png - - - type: feature - title: Anonymize pod name + namespace when using gather-logs command - body: >- - Adding the flag --anonymize to your command will - anonymize your pod names + namespaces in the output file. We replace the - sensitive names with simple names (e.g. pod-1, namespace-2) to maintain - relationships between the objects without exposing the real names of your - objects. This flag is set to false by default. - docs: reference/client - image: telepresence-2.4.5-logs-anonymize.png - - - type: feature - title: Added context and defaults to ingress questions when creating a preview URL - body: >- - Previously, we referred to OSI model layers when asking these questions, but this - terminology is not commonly used. The questions now provide a clearer context for the user, along with a default answer as an example. - docs: howtos/preview-urls - image: telepresence-2.4.5-preview-url-questions.png - - - type: feature - title: Support for intercepting headless services - body: >- - Intercepting headless services is now officially supported. You can request a - headless service on whatever port it exposes and get a response from the - intercept. This leverages the same approach as intercepting numeric ports when - using the mutating webhook injector, mainly requires the initContainer - to have NET_ADMIN capabilities. - docs: reference/intercepts/#intercepting-headless-services - - - type: change - title: Use one tunnel per connection instead of multiplexing into one tunnel - body: >- - We have changed Telepresence so that it uses one tunnel per connection instead - of multiplexing all connections into one tunnel. This will provide substantial - performance improvements. Clients will still be backwards compatible with older - managers that only support multiplexing. - - - type: bugfix - title: Added checks for Telepresence kubernetes compatibility - body: >- - Telepresence currently works with Kubernetes server versions 1.17.0 - and higher. We have added logs in the connector and traffic-manager - to let users know when they are using Telepresence with a cluster it doesn't support. - docs: reference/cluster-config - - - type: bugfix - title: Traffic Agent security context is now only added when necessary - body: >- - When creating an intercept, Telepresence will now only set the traffic agent's GID - when strictly necessary (i.e. when using headless services or numeric ports). This mitigates - an issue on openshift clusters where the traffic agent can fail to be created due to - openshift's security policies banning arbitrary GIDs. - - - version: 2.4.4 - date: "2021-09-27" - notes: - - type: feature - title: Numeric ports in agent injector - body: >- - The agent injector now supports injecting Traffic Agents into pods that have unnamed ports. - docs: reference/cluster-config/#note-on-numeric-ports - - - type: feature - title: New subcommand to gather logs and export into zip file - body: >- - Telepresence has logs for various components (the - traffic-manager, traffic-agents, the root and - user daemons), which are integral for understanding and debugging - Telepresence behavior. We have added the telepresence - gather-logs command to make it simple to compile logs for - all Telepresence components and export them in a zip file that can - be shared to others and/or included in a github issue. For more - information on usage, run telepresence gather-logs --help - . - docs: reference/client - image: telepresence-2.4.4-gather-logs.png - - - type: feature - title: Pod CIDR strategy is configurable in Helm chart - body: >- - Telepresence now enables you to directly configure how to get - pod CIDRs when deploying Telepresence with the Helm chart. - The default behavior remains the same. We've also introduced - the ability to explicitly set what the pod CIDRs should be. - docs: install/helm - - - type: bugfix - title: Compute pod CIDRs more efficiently - body: >- - When computing subnets using the pod CIDRs, the traffic-manager - now uses less CPU cycles. - docs: reference/routing/#subnets - - - type: bugfix - title: Prevent busy loop in traffic-manager - body: >- - In some circumstances, the traffic-manager's CPU - would max out and get pinned at its limit. This required a - shutdown or pod restart to fix. We've added some fixes - to prevent the traffic-manager from getting into this state. - - - type: bugfix - title: Added a fixed buffer size to TUN-device - body: >- - The TUN-device now has a max buffer size of 64K. This prevents the - buffer from growing limitlessly until it receies a PSH, which could - be a blocking operation when receiving lots of TCP-packets. - docs: reference/tun-device - - - type: bugfix - title: Fix hanging user daemon - body: >- - When Telepresence encountered an issue connecting to the cluster or - the root daemon, it could hang indefintely. It now will error correctly - when it encounters that situation. - - - type: bugfix - title: Improved proprietary agent connectivity - body: >- - To determine whether the environment cluster is air-gapped, the - proprietary agent attempts to connect to the cloud during startup. - To deal with a possible initial failure, the agent backs off - and retries the connection with an increasing backoff duration. - - - type: bugfix - title: Telepresence correctly reports intercept port conflict - body: >- - When creating a second intercept targetting the same local port, - it now gives the user an informative error message. Additionally, - it tells them which intercept is currently using that port to make - it easier to remedy. - - - version: 2.4.3 - date: "2021-09-15" - notes: - - type: feature - title: Environment variable TELEPRESENCE_INTERCEPT_ID available in interceptor's environment - body: >- - When you perform an intercept, we now include a TELEPRESENCE_INTERCEPT_ID environment - variable in the environment. - docs: reference/environment/#telepresence-environment-variables - - - type: bugfix - title: Improved daemon stability - body: >- - Fixed a timing bug that sometimes caused a "daemon did not start" failure. - - - type: bugfix - title: Complete logs for Windows - body: >- - Crash stack traces and other errors were incorrectly not written to log files. This has - been fixed so logs for Windows should be at parity with the ones in MacOS and Linux. - - - type: bugfix - title: Log rotation fix for Linux kernel 4.11+ - body: >- - On Linux kernel 4.11 and above, the log file rotation now properly reads the - birth-time of the log file. Older kernels continue to use the old behavior - of using the change-time in place of the birth-time. - - - type: bugfix - title: Improved error messaging - body: >- - When Telepresence encounters an error, it tells the user where they should look for - logs related to the error. We have refined this so that it only tells users to look - for errors in the daemon logs for issues that are logged there. - - - type: bugfix - title: Stop resolving localhost - body: >- - When using the overriding DNS resolver, it will no longer apply search paths when - resolving localhost, since that should be resolved on the user's machine - instead of the cluster. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Variable cluster domain - body: >- - Previously, the cluster domain was hardcoded to cluster.local. While this - is true for many kubernetes clusters, it is not for all of them. Now this value is - retrieved from the traffic-manager. - - - type: bugfix - title: Improved cleanup of traffic-agents - body: >- - Telepresence now uninstalls traffic-agents installed via mutating webhook - when using telepresence uninstall --everything. - - - type: bugfix - title: More large file transfer fixes - body: >- - Downloading large files during an intercept will no longer cause timeouts and hanging - traffic-agents. - - - type: bugfix - title: Setting --mount to false when intercepting works as expected - body: >- - When using --mount=false while performing an intercept, the file system - was still mounted. This has been remedied so the intercept behavior respects the - flag. - docs: reference/volume - - - type: bugfix - title: Traffic-manager establishes outbound connections in parallel - body: >- - Previously, the traffic-manager established outbound connections - sequentially. This resulted in slow (and failing) Dial calls would - block all outbound traffic from the workstation (for up to 30 seconds). We now - establish these connections in parallel so that won't occur. - docs: reference/routing/#outbound - - - type: bugfix - title: Status command reports correct DNS settings - body: >- - Telepresence status now correctly reports DNS settings for all operating - systems, instead of Local IP:nil, Remote IP:nil when they don't exist. - - - version: 2.4.2 - date: "2021-09-01" - notes: - - type: feature - title: New subcommand to temporarily change log-level - body: >- - We have added a new telepresence loglevel subcommand that enables users - to temporarily change the log-level for the local demons, the traffic-manager and - the traffic-agents. While the logLevels settings from the config will - still be used by default, this can be helpful if you are currently experiencing an issue and - want to have higher fidelity logs, without doing a telepresence quit and - telepresence connect. You can use telepresence loglevel --help to get - more information on options for the command. - docs: reference/config - - - type: change - title: All components have info as the default log-level - body: >- - We've now set the default for all components of Telepresence (traffic-agent, - traffic-manager, local daemons) to use info as the default log-level. - - - type: bugfix - title: Updating RBAC in helm chart to fix cluster-id regression - body: >- - In 2.4.1, we enabled the traffic-manager to get the cluster ID by getting the UID - of the default namespace. The helm chart was not updated to give the traffic-manager - those permissions, which has since been fixed. This impacted users who use licensed features of - the Telepresence extension in an air-gapped environment. - docs: reference/cluster-config/#air-gapped-cluster - - - type: bugfix - title: Timeouts for Helm actions are now respected - body: >- - The user-defined timeout for Helm actions wasn't always respected, causing the daemon to hang - indefinitely when failing to install the traffic-manager. - docs: reference/config#timeouts - - - version: 2.4.1 - date: "2021-08-30" - notes: - - type: feature - title: External cloud variables are now configurable - body: >- - We now support configuring the host and port for the cloud in your config.yml. These - are used when logging in to utilize features provided by an extension, and are also passed - along as environment variables when installing the `traffic-manager`. Additionally, we - now run our testsuite with these variables set to localhost to continue to ensure Telepresence - is fully fuctional without depeneding on an external service. The SYSTEMA_HOST and SYSTEMA_PORT - environment variables are no longer used. - image: telepresence-2.4.1-systema-vars.png - docs: reference/config/#cloud - - - type: feature - title: Helm chart can now regenerate certificate used for mutating webhook on-demand. - body: >- - You can now set agentInjector.certificate.regenerate when deploying Telepresence - with the Helm chart to automatically regenerate the certificate used by the agent injector webhook. - docs: install/helm - - - type: change - title: Traffic Manager installed via helm - body: >- - The traffic-manager is now installed via an embedded version of the Helm chart when telepresence connect is first performed on a cluster. - This change is transparent to the user. - A new configuration flag, timeouts.helm sets the timeouts for all helm operations performed by the Telepresence binary. - docs: reference/config#timeouts - - - type: change - title: traffic-manager gets cluster ID itself instead of via environment variable - body: >- - The traffic-manager used to get the cluster ID as an environment variable when running - telepresence connnect or via adding the value in the helm chart. This was - clunky so now the traffic-manager gets the value itself as long as it has permissions - to "get" and "list" namespaces (this has been updated in the helm chart). - docs: install/helm - - - type: bugfix - title: Telepresence now mounts all directories from /var/run/secrets - body: >- - In the past, we only mounted secret directories in /var/run/secrets/kubernetes.io. - We now mount *all* directories in /var/run/secrets, which, for example, includes - directories like eks.amazonaws.com used for IRSA tokens. - docs: reference/volume - - - type: bugfix - title: Max gRPC receive size correctly propagates to all grpc servers - body: >- - This fixes a bug where the max gRPC receive size was only propagated to some of the - grpc servers, causing failures when the message size was over the default. - docs: reference/config/#grpc - - - type: bugfix - title: Updated our Homebrew packaging to run manually - body: >- - We made some updates to our script that packages Telepresence for Homebrew so that it - can be run manually. This will enable maintainers of Telepresence to run the script manually - should we ever need to rollback a release and have latest point to an older verison. - docs: install/ - - - type: bugfix - title: Telepresence uses namespace from kubeconfig context on each call - body: >- - In the past, Telepresence would use whatever namespace was specified in the kubeconfig's current-context - for the entirety of the time a user was connected to Telepresence. This would lead to confusing behavior - when a user changed the context in their kubeconfig and expected Telepresence to acknowledge that change. - Telepresence now will do that and use the namespace designated by the context on each call. - - - type: bugfix - title: Idle outbound TCP connections timeout increased to 7200 seconds - body: >- - Some users were noticing that their intercepts would start failing after 60 seconds. - This was because the keep idle outbound TCP connections were set to 60 seconds, which we have - now bumped to 7200 seconds to match Linux's tcp_keepalive_time default. - - - type: bugfix - title: Telepresence will automatically remove a socket upon ungraceful termination - body: >- - When a Telepresence process terminates ungracefully, it would inform users that "this usually means - that the process has terminated ungracefully" and implied that they should remove the socket. We've - now made it so Telepresence will automatically attempt to remove the socket upon ungraceful termination. - - - type: bugfix - title: Fixed user daemon deadlock - body: >- - Remedied a situation where the user daemon could hang when a user was logged in. - - - type: bugfix - title: Fixed agentImage config setting - body: >- - The config setting images.agentImages is no longer required to contain the repository, and it - will use the value at images.repository. - docs: reference/config/#images - - - version: 2.4.0 - date: "2021-08-04" - notes: - - type: feature - title: Windows Client Developer Preview - body: >- - There is now a native Windows client for Telepresence that is being released as a Developer Preview. - All the same features supported by the MacOS and Linux client are available on Windows. - image: telepresence-2.4.0-windows.png - docs: install - - - type: feature - title: CLI raises helpful messages from Ambassador Cloud - body: >- - Telepresence can now receive messages from Ambassador Cloud and raise - them to the user when they perform certain commands. This enables us - to send you messages that may enhance your Telepresence experience when - using certain commands. Frequency of messages can be configured in your - config.yml. - image: telepresence-2.4.0-cloud-messages.png - docs: reference/config#cloud - - - type: bugfix - title: Improved stability of systemd-resolved-based DNS - body: >- - When initializing the systemd-resolved-based DNS, the routing domain - is set to improve stability in non-standard configurations. This also enables the - overriding resolver to do a proper take over once the DNS service ends. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Fixed an edge case when intercepting a container with multiple ports - body: >- - When specifying a port of a container to intercept, if there was a container in the - pod without ports, it was automatically selected. This has been fixed so we'll only - choose the container with "no ports" if there's no container that explicitly matches - the port used in your intercept. - docs: reference/intercepts/#creating-an-intercept-when-a-service-has-multiple-ports - - - type: bugfix - title: $(NAME) references in agent's environments are now interpolated correctly. - body: >- - If you had an environment variable $(NAME) in your workload that referenced another, intercepts - would not correctly interpolate $(NAME). This has been fixed and works automatically. - - - type: bugfix - title: Telepresence no longer prints INFO message when there is no config.yml - body: >- - Fixed a regression that printed an INFO message to the terminal when there wasn't a - config.yml present. The config is optional, so this message has been - removed. - docs: reference/config - - - type: bugfix - title: Telepresence no longer panics when using --http-match - body: >- - Fixed a bug where Telepresence would panic if the value passed to --http-match - didn't contain an equal sign, which has been fixed. The correct syntax is in the --help - string and looks like --http-match=HTTP2_HEADER=REGEX - docs: reference/intercepts/#intercept-behavior-when-logged-in-to-ambassador-cloud - - - type: bugfix - title: Improved subnet updates - body: >- - The `traffic-manager` used to update subnets whenever the `Nodes` or `Pods` changed, even if - the underlying subnet hadn't changed, which created a lot of unnecessary traffic between the - client and the `traffic-manager`. This has been fixed so we only send updates when the subnets - themselves actually change. - docs: reference/routing/#subnets - - - version: 2.3.7 - date: "2021-07-23" - notes: - - type: feature - title: Also-proxy in telepresence status - body: >- - An also-proxy entry in the Kubernetes cluster config will - show up in the output of the telepresence status command. - docs: reference/config - - - type: feature - title: Non-interactive telepresence login - body: >- - telepresence login now has an - --apikey=KEY flag that allows for - non-interactive logins. This is useful for headless - environments where launching a web-browser is impossible, - such as cloud shells, Docker containers, or CI. - image: telepresence-2.3.7-newkey.png - docs: reference/client/login/ - - - type: bugfix - title: Mutating webhook injector correctly hides named ports for probes. - body: >- - The mutating webhook injector has been fixed to correctly rename named ports for liveness and readiness probes - docs: reference/cluster-config - - - type: bugfix - title: telepresence current-cluster-id crash fixed - body: >- - Fixed a regression introduced in 2.3.5 that caused `telepresence current-cluster-id` - to crash. - docs: reference/cluster-config - - - type: bugfix - title: Better UX around intercepts with no local process running - body: >- - Requests would hang indefinitely when initiating an intercept before you - had a local process running. This has been fixed and will result in an - Empty reply from server until you start a local process. - docs: reference/intercepts - - - type: bugfix - title: API keys no longer show as "no description" - body: >- - New API keys generated internally for communication with - Ambassador Cloud no longer show up as "no description" in - the Ambassador Cloud web UI. Existing API keys generated by - older versions of Telepresence will still show up this way. - image: telepresence-2.3.7-keydesc.png - - - type: bugfix - title: Fix corruption of user-info.json - body: >- - Fixed a race condition that logging in and logging out - rapidly could cause memory corruption or corruption of the - user-info.json cache file used when - authenticating with Ambassador Cloud. - - - type: bugfix - title: Improved DNS resolver for systemd-resolved - body: - Telepresence's systemd-resolved-based DNS resolver is now more - stable and in case it fails to initialize, the overriding resolver - will no longer cause general DNS lookup failures when telepresence defaults to - using it. - docs: reference/routing#linux-systemd-resolved-resolver - - - type: bugfix - title: Faster telepresence list command - body: - The performance of telepresence list has been increased - significantly by reducing the number of calls the command makes to the cluster. - docs: reference/client - - - version: 2.3.6 - date: "2021-07-20" - notes: - - type: bugfix - title: Fix preview URLs - body: >- - Fixed a regression introduced in 2.3.5 that caused preview - URLs to not work. - - - type: bugfix - title: Fix subnet discovery - body: >- - Fixed a regression introduced in 2.3.5 where the Traffic - Manager's RoleBinding did not correctly appoint - the traffic-manager Role, causing - subnet discovery to not be able to work correctly. - docs: reference/rbac/ - - - type: bugfix - title: Fix root-user configuration loading - body: >- - Fixed a regression introduced in 2.3.5 where the root daemon - did not correctly read the configuration file; ignoring the - user's configured log levels and timeouts. - docs: reference/config/ - - - type: bugfix - title: Fix a user daemon crash - body: >- - Fixed an issue that could cause the user daemon to crash - during shutdown, as during shutdown it unconditionally - attempted to close a channel even though the channel might - already be closed. - - - version: 2.3.5 - date: "2021-07-15" - notes: - - type: feature - title: traffic-manager in multiple namespaces - body: >- - We now support installing multiple traffic managers in the same cluster. - This will allow operators to install deployments of telepresence that are - limited to certain namespaces. - image: ./telepresence-2.3.5-traffic-manager-namespaces.png - docs: install/helm - - type: feature - title: No more dependence on kubectl - body: >- - Telepresence no longer depends on having an external - kubectl binary, which might not be present for - OpenShift users (who have oc instead of - kubectl). - - type: feature - title: Agent image now configurable - body: >- - We now support configuring which agent image + registry to use in the - config. This enables users whose laptop is an air-gapped environment to - create personal intercepts without requiring a login. It also makes it easier - for those who are developing on Telepresence to specify which agent image should - be used. Env vars TELEPRESENCE_AGENT_IMAGE and TELEPRESENCE_REGISTRY are no longer - used. - image: ./telepresence-2.3.5-agent-config.png - docs: reference/config/#images - - type: feature - title: Max gRPC receive size now configurable - body: >- - The default max size of messages received through gRPC (4 MB) is sometimes insufficient. It can now be configured. - image: ./telepresence-2.3.5-grpc-max-receive-size.png - docs: reference/config/#grpc - - type: feature - title: CLI can be used in air-gapped environments - body: >- - While Telepresence will auto-detect if your cluster is in an air-gapped environment, - we've added an option users can add to their config.yml to ensure the cli acts like it - is in an air-gapped environment. Air-gapped environments require a manually installed - licence. - docs: reference/cluster-config/#air-gapped-cluster - image: ./telepresence-2.3.5-skipLogin.png - - version: 2.3.4 - date: "2021-07-09" - notes: - - type: bugfix - title: Improved IP log statements - body: >- - Some log statements were printing incorrect characters, when they should have been IP addresses. - This has been resolved to include more accurate and useful logging. - docs: reference/config/#log-levels - image: ./telepresence-2.3.4-ip-error.png - - type: bugfix - title: Improved messaging when multiple services match a workload - body: >- - If multiple services matched a workload when performing an intercept, Telepresence would crash. - It now gives the correct error message, instructing the user on how to specify which - service the intercept should use. - image: ./telepresence-2.3.4-improved-error.png - docs: reference/intercepts - - type: bugfix - title: Traffic-manger creates services in its own namespace to determine subnet - body: >- - Telepresence will now determine the service subnet by creating a dummy-service in its own - namespace, instead of the default namespace, which was causing RBAC permissions issues in - some clusters. - docs: reference/routing/#subnets - - type: bugfix - title: Telepresence connect respects pre-existing clusterrole - body: >- - When Telepresence connects, if the traffic-manager's desired clusterrole already exists in the - cluster, Telepresence will no longer try to update the clusterrole. - docs: reference/rbac - - type: bugfix - title: Helm Chart fixed for clientRbac.namespaced - body: >- - The Telepresence Helm chart no longer fails when installing with --set clientRbac.namespaced=true. - docs: install/helm - - version: 2.3.3 - date: "2021-07-07" - notes: - - type: feature - title: Traffic Manager Helm Chart - body: >- - Telepresence now supports installing the Traffic Manager via Helm. - This will make it easy for operators to install and configure the - server-side components of Telepresence separately from the CLI (which - in turn allows for better separation of permissions). - image: ./telepresence-2.3.3-helm.png - docs: install/helm/ - - type: feature - title: Traffic-manager in custom namespace - body: >- - As the traffic-manager can now be installed in any - namespace via Helm, Telepresence can now be configured to look for the - Traffic Manager in a namespace other than ambassador. - This can be configured on a per-cluster basis. - image: ./telepresence-2.3.3-namespace-config.png - docs: reference/config - - type: feature - title: Intercept --to-pod - body: >- - telepresence intercept now supports a - --to-pod flag that can be used to port-forward sidecars' - ports from an intercepted pod. - image: ./telepresence-2.3.3-to-pod.png - docs: reference/intercepts - - type: change - title: Change in migration from edgectl - body: >- - Telepresence no longer automatically shuts down the old - api_version=1 edgectl daemon. If migrating - from such an old version of edgectl you must now manually - shut down the edgectl daemon before running Telepresence. - This was already the case when migrating from the newer - api_version=2 edgectl. - - type: bugfix - title: Fixed error during shutdown - body: >- - The root daemon no longer terminates when the user daemon disconnects - from its gRPC streams, and instead waits to be terminated by the CLI. - This could cause problems with things not being cleaned up correctly. - - type: bugfix - title: Intercepts will survive deletion of intercepted pod - body: >- - An intercept will survive deletion of the intercepted pod provided - that another pod is created (or already exists) that can take over. - - version: 2.3.2 - date: "2021-06-18" - notes: - # Headliners - - type: feature - title: Service Port Annotation - body: >- - The mutator webhook for injecting traffic-agents now - recognizes a - telepresence.getambassador.io/inject-service-port - annotation to specify which port to intercept; bringing the - functionality of the --port flag to users who - use the mutator webook in order to control Telepresence via - GitOps. - image: ./telepresence-2.3.2-svcport-annotation.png - docs: reference/cluster-config#service-port-annotation - - type: feature - title: Outbound Connections - body: >- - Outbound connections are now routed through the intercepted - Pods which means that the connections originate from that - Pod from the cluster's perspective. This allows service - meshes to correctly identify the traffic. - docs: reference/routing/#outbound - - type: change - title: Inbound Connections - body: >- - Inbound connections from an intercepted agent are now - tunneled to the manager over the existing gRPC connection, - instead of establishing a new connection to the manager for - each inbound connection. This avoids interference from - certain service mesh configurations. - docs: reference/routing/#inbound - - # RBAC changes - - type: change - title: Traffic Manager needs new RBAC permissions - body: >- - The Traffic Manager requires RBAC - permissions to list Nodes, Pods, and to create a dummy - Service in the manager's namespace. - docs: reference/routing/#subnets - - type: change - title: Reduced developer RBAC requirements - body: >- - The on-laptop client no longer requires RBAC permissions to list the Nodes - in the cluster or to create Services, as that functionality - has been moved to the Traffic Manager. - - # Bugfixes - - type: bugfix - title: Able to detect subnets - body: >- - Telepresence will now detect the Pod CIDR ranges even if - they are not listed in the Nodes. - image: ./telepresence-2.3.2-subnets.png - docs: reference/routing/#subnets - - type: bugfix - title: Dynamic IP ranges - body: >- - The list of cluster subnets that the virtual network - interface will route is now configured dynamically and will - follow changes in the cluster. - - type: bugfix - title: No duplicate subnets - body: >- - Subnets fully covered by other subnets are now pruned - internally and thus never superfluously added to the - laptop's routing table. - docs: reference/routing/#subnets - - type: change # not a bugfix, but it only makes sense to mention after the above bugfixes - title: Change in default timeout - body: >- - The trafficManagerAPI timeout default has - changed from 5 seconds to 15 seconds, in order to facilitate - the extended time it takes for the traffic-manager to do its - initial discovery of cluster info as a result of the above - bugfixes. - - type: bugfix - title: Removal of DNS config files on macOS - body: >- - On macOS, files generated under - /etc/resolver/ as the result of using - include-suffixes in the cluster config are now - properly removed on quit. - docs: reference/routing/#macos-resolver - - - type: bugfix - title: Large file transfers - body: >- - Telepresence no longer erroneously terminates connections - early when sending a large HTTP response from an intercepted - service. - - type: bugfix - title: Race condition in shutdown - body: >- - When shutting down the user-daemon or root-daemon on the - laptop, telepresence quit and related commands - no longer return early before everything is fully shut down. - Now it can be counted on that by the time the command has - returned that all of the side-effects on the laptop have - been cleaned up. - - version: 2.3.1 - date: "2021-06-14" - notes: - - title: DNS Resolver Configuration - body: "Telepresence now supports per-cluster configuration for custom dns behavior, which will enable users to determine which local + remote resolver to use and which suffixes should be ignored + included. These can be configured on a per-cluster basis." - image: ./telepresence-2.3.1-dns.png - docs: reference/config - type: feature - - title: AlsoProxy Configuration - body: "Telepresence now supports also proxying user-specified subnets so that they can access external services only accessible to the cluster while connected to Telepresence. These can be configured on a per-cluster basis and each subnet is added to the TUN device so that requests are routed to the cluster for IPs that fall within that subnet." - image: ./telepresence-2.3.1-alsoProxy.png - docs: reference/config - type: feature - - title: Mutating Webhook for Injecting Traffic Agents - body: "The Traffic Manager now contains a mutating webhook to automatically add an agent to pods that have the telepresence.getambassador.io/traffic-agent: enabled annotation. This enables Telepresence to work well with GitOps CD platforms that rely on higher level kubernetes objects matching what is stored in git. For workloads without the annotation, Telepresence will add the agent the way it has in the past" - image: ./telepresence-2.3.1-inject.png - docs: reference/rbac - type: feature - - title: Traffic Manager Connect Timeout - body: "The trafficManagerConnect timeout default has changed from 20 seconds to 60 seconds, in order to facilitate the extended time it takes to apply everything needed for the mutator webhook." - image: ./telepresence-2.3.1-trafficmanagerconnect.png - docs: reference/config - type: change - - title: Fix for large file transfers - body: "Fix a tun-device bug where sometimes large transfers from services on the cluster would hang indefinitely" - image: ./telepresence-2.3.1-large-file-transfer.png - docs: reference/tun-device - type: bugfix - - title: Brew Formula Changed - body: "Now that the Telepresence rewrite is the main version of Telepresence, you can install it via Brew like so: brew install datawire/blackbird/telepresence." - image: ./telepresence-2.3.1-brew.png - docs: install/ - type: change - - version: 2.3.0 - date: "2021-06-01" - notes: - - title: Brew install Telepresence - body: "Telepresence can now be installed via brew on macOS, which makes it easier for users to stay up-to-date with the latest telepresence version. To install via brew, you can use the following command: brew install datawire/blackbird/telepresence2." - image: ./telepresence-2.3.0-homebrew.png - docs: install/ - type: feature - - title: TCP and UDP routing via Virtual Network Interface - body: "Telepresence will now perform routing of outbound TCP and UDP traffic via a Virtual Network Interface (VIF). The VIF is a layer 3 TUN-device that exists while Telepresence is connected. It makes the subnets in the cluster available to the workstation and will also route DNS requests to the cluster and forward them to intercepted pods. This means that pods with custom DNS configuration will work as expected. Prior versions of Telepresence would use firewall rules and were only capable of routing TCP." - image: ./tunnel.jpg - docs: reference/tun-device - type: feature - - title: SSH is no longer used - body: "All traffic between the client and the cluster is now tunneled via the traffic manager gRPC API. This means that Telepresence no longer uses ssh tunnels and that the manager no longer have an sshd installed. Volume mounts are still established using sshfs but it is now configured to communicate using the sftp-protocol directly, which means that the traffic agent also runs without sshd. A desired side effect of this is that the manager and agent containers no longer need a special user configuration." - image: ./no-ssh.png - docs: reference/tun-device/#no-ssh-required - type: change - - title: Running in a Docker container - body: "Telepresence can now be run inside a Docker container. This can be useful for avoiding side effects on a workstation's network, establishing multiple sessions with the traffic manager, or working with different clusters simultaneously." - image: ./run-tp-in-docker.png - docs: reference/inside-container - type: feature - - title: Configurable Log Levels - body: "Telepresence now supports configuring the log level for Root Daemon and User Daemon logs. This provides control over the nature and volume of information that Telepresence generates in daemon.log and connector.log." - image: ./telepresence-2.3.0-loglevels.png - docs: reference/config/#log-levels - type: feature - - version: 2.2.2 - date: "2021-05-17" - notes: - - title: Legacy Telepresence subcommands - body: Telepresence is now able to translate common legacy Telepresence commands into native Telepresence commands. So if you want to get started quickly, you can just use the same legacy Telepresence commands you are used to with the new Telepresence binary. - image: ./telepresence-2.2.png - docs: install/migrate-from-legacy/ - type: feature diff --git a/docs/v2.5/troubleshooting/index.md b/docs/v2.5/troubleshooting/index.md deleted file mode 100644 index 21ff5405..00000000 --- a/docs/v2.5/troubleshooting/index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -description: "Troubleshooting issues related to Telepresence." ---- -# Troubleshooting - -## Creating an intercept did not generate a preview URL - -Preview URLs can only be created if Telepresence is [logged in to -Ambassador Cloud](../reference/client/login/). When not logged in, it -will not even try to create a preview URL (additionally, by default it -will intercept all traffic rather than just a subset of the traffic). -Remove the intercept with `telepresence leave [deployment name]`, run -`telepresence login` to login to Ambassador Cloud, then recreate the -intercept. See the [intercepts how-to doc](../howtos/intercepts) for -more details. - -## Error on accessing preview URL: `First record does not look like a TLS handshake` - -The service you are intercepting is likely not using TLS, however when configuring the intercept you indicated that it does use TLS. Remove the intercept with `telepresence leave [deployment name]` and recreate it, setting `TLS` to `n`. Telepresence tries to intelligently determine these settings for you when creating an intercept and offer them as defaults, but odd service configurations might cause it to suggest the wrong settings. - -## Error on accessing preview URL: Detected a 301 Redirect Loop - -If your ingress is set to redirect HTTP requests to HTTPS and your web app uses HTTPS, but you configure the intercept to not use TLS, you will get this error when opening the preview URL. Remove the intercept with `telepresence leave [deployment name]` and recreate it, selecting the correct port and setting `TLS` to `y` when prompted. - -## Connecting to a cluster via VPN doesn't work. - -There are a few different issues that could arise when working with a VPN. Please see the [dedicated page](../reference/vpn) on Telepresence and VPNs to learn more on how to fix these. - -## Your GitHub organization isn't listed - -Ambassador Cloud needs access granted to your GitHub organization as a -third-party OAuth app. If an organization isn't listed during login -then the correct access has not been granted. - -The quickest way to resolve this is to go to the **Github menu** → -**Settings** → **Applications** → **Authorized OAuth Apps** → -**Ambassador Labs**. An organization owner will have a **Grant** -button, anyone not an owner will have **Request** which sends an email -to the owner. If an access request has been denied in the past the -user will not see the **Request** button, they will have to reach out -to the owner. - -Once access is granted, log out of Ambassador Cloud and log back in; -you should see the GitHub organization listed. - -The organization owner can go to the **GitHub menu** → **Your -organizations** → **[org name]** → **Settings** → **Third-party -access** to see if Ambassador Labs has access already or authorize a -request for access (only owners will see **Settings** on the -organization page). Clicking the pencil icon will show the -permissions that were granted. - -GitHub's documentation provides more detail about [managing access granted to third-party applications](https://docs.github.com/en/github/authenticating-to-github/connecting-with-third-party-applications) and [approving access to apps](https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/approving-oauth-apps-for-your-organization). - -### Granting or requesting access on initial login - -When using GitHub as your identity provider, the first time you log in -to Ambassador Cloud GitHub will ask to authorize Ambassador Labs to -access your organizations and certain user data. - - - -Any listed organization with a green check has already granted access -to Ambassador Labs (you still need to authorize to allow Ambassador -Labs to read your user data and organization membership). - -Any organization with a red "X" requires access to be granted to -Ambassador Labs. Owners of the organization will see a **Grant** -button. Anyone who is not an owner will see a **Request** button. -This will send an email to the organization owner requesting approval -to access the organization. If an access request has been denied in -the past the user will not see the **Request** button, they will have -to reach out to the owner. - -Once approval is granted, you will have to log out of Ambassador Cloud -then back in to select the organization. - -### Volume mounts are not working on macOS - -It's necessary to have `sshfs` installed in order for volume mounts to work correctly during intercepts. Lately there's been some issues using `brew install sshfs` a macOS workstation because the required component `osxfuse` (now named `macfuse`) isn't open source and hence, no longer supported. As a workaround, you can now use `gromgit/fuse/sshfs-mac` instead. Follow these steps: - -1. Remove old sshfs, macfuse, osxfuse using `brew uninstall` -2. `brew install --cask macfuse` -3. `brew install gromgit/fuse/sshfs-mac` -4. `brew link --overwrite sshfs-mac` - -Now sshfs -V shows you the correct version, e.g.: -``` -$ sshfs -V -SSHFS version 2.10 -FUSE library version: 2.9.9 -fuse: no mount point -``` - -but one more thing must be done before it works OK: -5. Try a mount (or an intercept that performs a mount). It will fail because you need to give permission to “Benjamin Fleischer” to execute a kernel extension (a pop-up appears that takes you to the system preferences). -6. Approve the needed permission -7. Reboot your computer. diff --git a/docs/v2.5/tutorial.md b/docs/v2.5/tutorial.md deleted file mode 100644 index 77b55591..00000000 --- a/docs/v2.5/tutorial.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -description: "Install Telepresence and learn to use it to intercept services running in your Kubernetes cluster, speeding up local development and debugging." ---- - -import Alert from '@material-ui/lab/Alert'; - -# Telepresence Quick Start - -In this guide you will explore some of the key features of Telepresence. First, you will install the Telepresence CLI and set up a test cluster with a demo web app. Then, you will run one of the app's services on your laptop, using Telepresence to intercept requests to the service on the cluster and see your changes live via a preview URL. - -## Prerequisites - -It is recommended to use an empty development cluster for this guide. You must have access via RBAC to create and update deployments and services in the cluster. You must also have [Node.js installed](https://nodejs.org/en/download/package-manager/) on your laptop to run the demo app code. - -Finally, you will need the Telepresence CLI. Run the commands for -your OS to install it and log in to Ambassador Cloud in your browser. -Follow the prompts to log in with GitHub then select your -organization. You will be redirected to the Ambassador Cloud -dashboard; later you will manage your preview URLs here. - -### macOS - -```shell -# Intel Macs - -# Install via brew: -brew install datawire/blackbird/telepresence - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login - -# Apple silicon Macs - -# Install via brew: -brew install datawire/blackbird/telepresence-arm64 - -# OR Install manually: -# 1. Download the latest binary (~60 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/darwin/arm64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you receive an error saying the developer cannot be verified, open System Preferences → Security & Privacy → General. Click Open Anyway at the bottom to bypass the security block. Then retry the telepresence login command. - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -### Linux - -```shell -# 1. Download the latest binary (~50 MB): -sudo curl -fL https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence \ --o /usr/local/bin/telepresence - -# 2. Make the binary executable: -sudo chmod a+x /usr/local/bin/telepresence - -# 3. Login with the CLI: -telepresence login -``` - -If you are in an environment where Telepresence cannot launch a local -browser for you to interact with, you will need to pass the -[`--apikey` flag to `telepresence -login`](../../reference/client/login/). - -## Cluster Setup - -1. You will use a sample Java app for this guide. Later, after deploying the app into your cluster, we will review its architecture. Start by cloning the repo: - - ``` - git clone https://github.com/datawire/amb-code-quickstart-app.git - ``` - -2. Install [Edge Stack](../../../../../../products/edge-stack/) to use as an ingress controller for your cluster. We need an ingress controller to allow access to the web app from the internet. - - Change into the repo directory, then into `k8s-config`, and apply the YAML files to deploy Edge Stack. - - ``` - cd amb-code-quickstart-app/k8s-config - kubectl apply -f 1-aes-crds.yml && kubectl wait --for condition=established --timeout=90s crd -lproduct=aes - kubectl apply -f 2-aes.yml && kubectl wait -n ambassador deploy -lproduct=aes --for condition=available --timeout=90s - ``` - -3. Install the web app by applying its manifest: - - ``` - kubectl apply -f edgy-corp-web-app.yaml - ``` - -4. Wait a few moments for the external load balancer to become available, then retrieve its IP address: - - ``` - kubectl get service -n ambassador ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}' - ``` - - - - - - -
  1. Wait until all the pods start, then access the the Edgy Corp web app in your browser at http://<load-balancer-ip/>. Be sure you use http, not https!
    You should see the landing page for the web app with an architecture diagram. The web app is composed of three services, with the frontend VeryLargeJavaService dependent on the two backend services.
- -## Developing with Telepresence - -Now that your app is all wired up you're ready to start doing development work with Telepresence. Imagine you are a Java developer and first on your to-do list for the day is a change on the `DataProcessingNodeService`. One thing this service does is set the color for the title and a pod in the diagram. The production version of the app on the cluster uses green elements, but you want to see a version with these elements set to blue. - -The `DataProcessingNodeService` service is dependent on the `VeryLargeJavaService` and `VeryLargeDataStore` services to run. Local development would require one of the two following setups, neither of which is ideal. - -First, you could run the two dependent services on your laptop. However, as their names suggest, they are too large to run locally. This option also doesn't scale well. Two services isn't a lot to manage, but more complex apps requiring many more dependencies is not feasible to manage running on your laptop. - -Second, you could run everything in a development cluster. However, the cycle of writing code then waiting on containers to build and deploy is incredibly disruptive. The lengthening of the [inner dev loop](../concepts/devloop) in this way can have a significant impact on developer productivity. - -## Intercepting a Service - -Alternatively, you can use Telepresence's `intercept` command to proxy traffic bound for a service to your laptop. This will let you test and debug services on code running locally without needing to run dependent services or redeploy code updates to your cluster on every change. It also will generate a preview URL, which loads your web app from the cluster ingress but with requests to the intercepted service proxied to your laptop. - -1. You started this guide by installing the Telepresence CLI and - logging in to Ambassador Cloud. The Cloud dashboard is used to - manage your intercepts and share them with colleagues. You must be - logged in to create personal intercepts as we are going to do here. - - Run telepresence dashboard if you are already logged in and just need to reopen the dashboard. - -2. In your terminal and run `telepresence list`. This will connect to your cluster, install the [Traffic Manager](../reference/#architecture) to proxy the traffic, and return a list of services that Telepresence is able to intercept. - -3. Navigate up one directory to the root of the repo then into `DataProcessingNodeService`. Install the Node.js dependencies and start the app passing the `blue` argument, which is used by the app to set the title and pod color in the diagram you saw earlier. - - ``` - cd ../DataProcessingNodeService - npm install - node app -c blue - ``` - -4. In a new terminal window start the intercept with the command below. This will proxy requests to the `DataProcessingNodeService` service to your laptop. It will also generate a preview URL, which will let you view the app with the intercepted service in your browser. - - The intercept requires you specify the name of the deployment to be intercepted and the port to proxy. - - ``` - telepresence intercept dataprocessingnodeservice --port 3000 - ``` - - You will be prompted with a few options. Telepresence tries to intelligently determine the deployment and namespace of your ingress controller. Hit `enter` to accept the default value of `ambassador.ambassador` for `Ingress`. For simplicity's sake, our app uses 80 for the port and does *not* use TLS, so use those options when prompted for the `port` and `TLS` settings. Your output should be similar to this: - - ``` - $ telepresence intercept dataprocessingnodeservice --port 3000 - To create a preview URL, telepresence needs to know how cluster - ingress works for this service. Please Select the ingress to use. - - 1/4: What's your ingress' layer 3 (IP) address? - You may use an IP address or a DNS name (this is usually a - "service.namespace" DNS name). - - [no default]: verylargejavaservice.default - - 2/4: What's your ingress' layer 4 address (TCP port number)? - - [no default]: 8080 - - 3/4: Does that TCP port on your ingress use TLS (as opposed to cleartext)? - - [default: n]: - - 4/4: If required by your ingress, specify a different layer 5 hostname - (TLS-SNI, HTTP "Host" header) to access this service. - - [default: verylargejavaservice.default]: - - Using Deployment dataprocessingservice - intercepted - Intercept name : dataprocessingservice - State : ACTIVE - Workload kind : Deployment - Destination : 127.0.0.1:3000 - Intercepting : HTTP requests that match all of: - header("x-telepresence-intercept-id") ~= regexp("86cb4a70-c7e1-1138-89c2-d8fed7a46cae:dataprocessingservice") - Preview URL : https://.preview.edgestack.me - Layer 5 Hostname: verylargejavaservice.default - ``` - - - - - - -
  1. Open the preview URL in your browser to see the intercepted version of the app. The Node server on your laptop replies back to the cluster with the blue option enabled; you will see a blue title and blue pod in the diagram. Remember that previously these elements were green.
    You will also see a banner at the bottom on the page informing that you are viewing a preview URL with your name and org name.
- - - - - - -
  1. Switch back in your browser to the dashboard page and refresh it to see your preview URL listed. Click the box to expand out options where you can disable authentication or remove the preview.
    If there were other developers in your organization also creating preview URLs, you would see them here as well.
- -This diagram demonstrates the flow of requests using the intercept. The laptop on the left visits the preview URL, the request is redirected to the cluster ingress, and requests to and from the `DataProcessingNodeService` by other pods are proxied to the developer laptop running Telepresence. - -![Intercept Architecture](../../images/tp-tutorial-4.png) - -7. Clean up your environment by first typing `Ctrl+C` in the terminal running Node. Then stop the intercept with the `leave` command and `quit` to stop the daemon. Finally, use `uninstall --everything` to remove the Traffic Manager and Agents from your cluster. - - ``` - telepresence leave dataprocessingnodeservice - telepresence quit - telepresence uninstall --everything - ``` - -8. Refresh the dashboard page again and you will see the intercept was removed after running the `leave` command. Refresh the browser tab with the preview URL and you will see that it has been disabled. - -## What's Next? - -Telepresence and preview URLS open up powerful possibilities for [collaborating](../howtos/preview-urls) with your colleagues and others outside of your organization. - -Learn more about how Telepresence handles [outbound sessions](../howtos/outbound), allowing locally running services to interact with cluster services without an intercept. - -Read the [FAQs](../faqs) to learn more about uses cases and the technical implementation of Telepresence. diff --git a/docs/v2.5/versions.yml b/docs/v2.5/versions.yml deleted file mode 100644 index a8bf8c43..00000000 --- a/docs/v2.5/versions.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: "2.5.8" -dlVersion: "latest" -docsVersion: "2.5" -branch: release/v2 -productName: "Telepresence" diff --git a/gatsby-config.js b/gatsby-config.js index 5f3394fd..27e44d25 100644 --- a/gatsby-config.js +++ b/gatsby-config.js @@ -18,7 +18,6 @@ module.exports = { siteURL: "https://www.telepresence.io" }, plugins: [ - // We have a bunch of documentation subtree'd in at ./docs/ { resolve: 'gatsby-source-filesystem', @@ -32,14 +31,6 @@ module.exports = { resolve: 'gatsby-plugin-mdx', options: { extensions: [`.md`], - gatsbyRemarkPlugins: [ - { - resolve: 'gatsby-remark-autolink-headers', - options: { - enableCustomId: true, - }, - }, - ], }, }, { @@ -62,9 +53,6 @@ module.exports = { path: `${__dirname}/src/frontmatter-schema/`, }, }, - { - resolve: 'gatsby-remark-reading-time', - }, // We need this in order to set things in in the HTML. Otherwise it // gets set by client-side in the DOM after page-load. { diff --git a/gatsby-node.js b/gatsby-node.js index cfef8dbc..8ad3b79a 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -13,10 +13,9 @@ exports.onCreateWebpackConfig = ({ actions }) => { alias: { "@src": path.resolve(__dirname, "src"), }, - }, - // https://github.com/gatsbyjs/gatsby/issues/564 - node: { - fs: 'empty', + fallback: { + fs: false, + }, }, }); }; @@ -33,24 +32,6 @@ exports.onCreateNode = async ({ node, loadNodeContent, actions }) => { } }; -// resolvePathToID takes a filepath, and resolves it to a node ID. -async function resolvePathToID(helpers, sourceInstanceName, relativePath) { - let result = await helpers.graphql(` - query($sourceInstanceName: String!, $relativePath: String!) { - file( - sourceInstanceName: { eq: $sourceInstanceName }, - relativePath: {eq: $relativePath }, - ) { - id - } - } - `, { - sourceInstanceName: sourceInstanceName, - relativePath: relativePath, - }); - return result.data.file?.id; -} - // Tell Gatsby to create web pages for each of the docs markdown files. exports.createPages = async ({ graphql, actions }) => { const docsConfig = require('./docs-config'); @@ -60,7 +41,7 @@ exports.createPages = async ({ graphql, actions }) => { query($sourceInstanceName: String!) { pageFiles: allFile(filter: { sourceInstanceName: { eq: $sourceInstanceName }, - base: { regex: "/^(.*[.]md|releaseNotes[.]yml)$/" }, + base: { regex: "/^.*[.]md$/" }, }) { edges { node { @@ -102,64 +83,62 @@ exports.createPages = async ({ graphql, actions }) => { for (const { node } of result.data.pageFiles.edges) { allURLPaths.add(docsConfig.urlpath(node)); } + const basepath = path.posix.sep; // ...and finally generate HTML pages for them. let variablesCache = {}; let sidebarCache = {}; for (const { node } of result.data.pageFiles.edges) { + const urlPath = docsConfig.urlpath(node); + const nodePath = urlPath.replaceAll(path.posix.sep, path.sep) const variablesFilepath = docsConfig.variablesFilepath(node); if (!(variablesFilepath in variablesCache)) { - variablesCache[variablesFilepath] = await resolvePathToID({ graphql }, docsConfig.sourceInstanceName, variablesFilepath); + const fp = path.join(docsConfig.sourceInstanceName, variablesFilepath) + if (fs.existsSync(fp)) { + variablesCache[variablesFilepath] = jsYAML.load(fs.readFileSync(fp)); + } } const sidebarFilepath = docsConfig.sidebarFilepath(node); if (!(sidebarFilepath in sidebarCache)) { - sidebarCache[sidebarFilepath] = await resolvePathToID({ graphql }, docsConfig.sourceInstanceName, sidebarFilepath); + const fp = path.join(docsConfig.sourceInstanceName, sidebarFilepath) + if (fs.existsSync(fp)) { + sidebarCache[sidebarFilepath] = jsYAML.load(fs.readFileSync(fp)); + } } - const urlpath = docsConfig.urlpath(node); + actions.createPage({ + // URL-path to create the page at + path: urlPath, + // Absolute filepath of the component to render the page with + component: path.resolve('./src/templates/doc-page.js'), + // Arguments to pass to that component's `query` + context: { + contentFileNodeID: node.id, + variables: variablesCache[variablesFilepath], + sidebar: sidebarCache[sidebarFilepath], + docinfo: { + docrootURL: docsConfig.docrootURL(node), + canonicalURL: docsConfig.canonicalURL(node), + githubURL: docsConfig.githubURL(node), + + maybeShowReadingTime: docsConfig.maybeShowReadingTime(node), - if (urlpath === '/docs/latest/quick-start/') { - actions.createPage({ - // URL-path to create the page at - path: urlpath, - // Absolute filepath of the component to render the page with - component: path.resolve('./src/templates/doc-page.js'), - // Arguments to pass to that component's `query` - context: { - contentFileNodeID: node.id, - variablesFileNodeID: variablesCache[variablesFilepath], - sidebarFileNodeID: sidebarCache[sidebarFilepath], - docinfo: { - docrootURL: docsConfig.docrootURL(node), - canonicalURL: docsConfig.canonicalURL(node), - githubURL: docsConfig.githubURL(node), - - maybeShowReadingTime: docsConfig.maybeShowReadingTime(node), - - peerVersions: docsConfig.peerVersions(urlpath, allURLPaths), - }, + peerVersions: docsConfig.peerVersions(urlPath, allURLPaths), }, - }); - } else { - actions.createRedirect({ - fromPath: urlpath, - toPath: '/docs/latest/quick-start/', - redirectInBrowser: true, - isPermanent: true, - }); - } - } + }, + }); - // Create up redirects - for (const { node } of result.data.redirectFiles.edges) { - const basepath = path.posix.dirname(docsConfig.urlpath(node)) + path.posix.sep; - for (const { from, to } of jsYAML.safeLoad(node.internal.content)) { - actions.createRedirect({ - fromPath: path.posix.normalize(url.resolve(basepath, from)+path.posix.sep), - toPath: url.resolve(basepath, to), - }) + const fp = path.join(nodePath, "redirects.yml") + if (fs.existsSync(fp)) { + const redirectFile = jsYAML.load(fs.readFileSync(fp)) + for (const {from, to} of redirectFile) { + actions.createRedirect({ + fromPath: path.posix.normalize(url.resolve(basepath, from) + path.posix.sep), + toPath: url.resolve(basepath, to), + }) + } } } @@ -173,8 +152,7 @@ exports.createPages = async ({ graphql, actions }) => { } // Side-wide redirects - const basepath = path.posix.sep; - for (const { from, to } of jsYAML.safeLoad(fs.readFileSync('./redirects.yml'))) { + for (const { from, to } of jsYAML.load(fs.readFileSync('./redirects.yml'))) { actions.createRedirect({ fromPath: url.resolve(basepath, from), toPath: url.resolve(basepath, to), diff --git a/package.json b/package.json index fd6044a5..f813d088 100644 --- a/package.json +++ b/package.json @@ -3,47 +3,60 @@ "private": true, "engines": { "npm": "Please use yarn instead of npm", - "yarn": "^v1.3.2", + "yarn": "^1.22.4", "node": ">=15.0.0" }, "dependencies": { - "@fontsource/inter": "^4.4.5", - "@material-ui/core": "^4.11.2", + "@fontsource/inter": "^5.0.20", + "@material-ui/core": "^4.12.3", "@material-ui/lab": "^4.0.0-alpha.57", - "@mdx-js/mdx": "^1.6.4", - "@mdx-js/react": "^1.6.4", - "@mdx-js/runtime": "^1.6.4", - "@reach/dialog": "^0.15.0", - "classnames": "^2.3.1", - "gatsby": "^2.24.2", + "@mdx-js/mdx": "^1.6.22", + "@mdx-js/react": "^1.6.22", + "@reach/dialog": "^0.16.2", + "classnames": "^2.2.6", + "gatsby": "^4.25.6", "gatsby-plugin-extract-schema": "^0.2.0", - "gatsby-plugin-google-tagmanager": "^2.3.12", - "gatsby-plugin-less": "^3.2.2", - "gatsby-plugin-mdx": "1.2.11", - "gatsby-plugin-netlify": "^2.3.11", - "gatsby-plugin-react-helmet": "^3.3.10", - "gatsby-plugin-react-svg": "^3.0.0", - "gatsby-remark-autolink-headers": "^2.3.11", - "gatsby-remark-reading-time": "^1.1.0", - "gatsby-source-filesystem": "2.3.7", + "gatsby-plugin-google-tagmanager": "^4.0.0", + "gatsby-plugin-less": "^6.0.0", + "gatsby-plugin-mdx": "^3.15.2", + "gatsby-plugin-netlify": "^4.4.0", + "gatsby-plugin-react-helmet": "^5.0.0", + "gatsby-plugin-react-svg": "^3.1.0", + "gatsby-source-filesystem": "^4.0.0", "jquery": "^3.6.0", - "less": "^3.11.3", - "mermaid": "^8.10.2", - "nanoid": "^3.1.20", - "netlify-redirect-parser": "^5.1.1", - "prism-react-renderer": "^1.2.1", - "react": "^16.13.1", - "react-dom": "^16.13.1", + "less": "^4.2.0", + "mermaid": "^11.0.2", + "nanoid": "^5.0.7", + "netlify-redirect-parser": "^14.3.0", + "prism-react-renderer": "^1.1.1", + "prismjs": "^1.27.0", + "react": "17.0.2", + "react-dom": "17.0.2", "react-github-btn": "^1.2.0", "react-helmet": "^6.1.0", - "react-hubspot-form": "datawire/datawire-react-hubspot-form.git", + "react-hubspot-form": "git+https://github.com/datawire/datawire-react-hubspot-form.git#6ed4e6c3a227ce7878f0b71b2cff39151bb1ec05", "react-intersection-observer": "^8.32.0", + "react-markdown": "^9.0.1", "react-scroll": "^1.8.2", "slick-carousel": "^1.8.1", - "typescript": "^2.8.0", - "webpack": "^4.44.1" + "typescript": "^4.4.4", + "typescript-plugin-css-modules": "^5.1.0", + "webpack": "^5.79.0" + }, + "devDependencies": { + "eslint": "^8.6.0", + "eslint-config-react-app": "^7.0.1", + "gatsby-plugin-webpack-bundle-analyser-v2": "^1.1.25" }, "resolutions": { - "socket.io-parser": "4.0.5" + "loader-utils": "2.0.4", + "parse-url": "8.1.0", + "yaml": "2.2.2", + "trim": "0.0.3", + "nth-check": "2.0.1" + }, + "repository": { + "type": "git", + "url": "https://github.com/telepresenceio/telepresence.io" } } diff --git a/src/assets/icons/bug.inline.svg b/src/assets/icons/bug.inline.svg deleted file mode 100644 index dec8f109..00000000 --- a/src/assets/icons/bug.inline.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/src/assets/icons/change.inline.svg b/src/assets/icons/change.inline.svg deleted file mode 100644 index 4eb7ef85..00000000 --- a/src/assets/icons/change.inline.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/src/assets/icons/security.inline.svg b/src/assets/icons/security.inline.svg deleted file mode 100644 index 056678e9..00000000 --- a/src/assets/icons/security.inline.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/src/assets/icons/tada.inline.svg b/src/assets/icons/tada.inline.svg deleted file mode 100644 index b3dc2a37..00000000 --- a/src/assets/icons/tada.inline.svg +++ /dev/null @@ -1,123 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/components/CodeBlock/CodeBlock.js b/src/components/CodeBlock/CodeBlock.js index 039fd589..7d9a6c54 100644 --- a/src/components/CodeBlock/CodeBlock.js +++ b/src/components/CodeBlock/CodeBlock.js @@ -1,17 +1,14 @@ -import React from "react"; -import Highlight, { defaultProps } from "prism-react-renderer"; +import Highlight, { defaultProps } from 'prism-react-renderer'; +import React from 'react'; +import { useInView } from 'react-intersection-observer'; -import Mermaid from '../Mermaid'; - -import styles from './styles.module.less'; -import './prism-ambassador.css'; +import CodeIcon from '../../../static/images/doc-icons/code.inline.svg'; +import TerminalIcon from '../../../static/images/doc-icons/terminal.inline.svg'; import CopyButton from './CopyButton'; +import * as styles from './styles.module.less'; -import TerminalIcon from '../../../static/images/doc-icons/terminal.inline.svg'; -import CodeIcon from '../../../static/images/doc-icons/code.inline.svg'; - -const ps1regex = /^(|\/ |\/ambassador |localhost|kubernetes|@minikube\|)[$#] /; +const ps1regex = /^(\$|\/ambassador #|localhost\$|kubernetes#|@minikube\|\$) /; /* * MDX passes the code block as JSX @@ -21,7 +18,7 @@ const ps1regex = /^(|\/ |\/ambassador |localhost|kubernetes|@minikube\|)[$#] /; * The original version of this code was copied from * https://github.com/gatsbyjs/gatsby/pull/15834 but has since been modified. */ -export default (props) => { +const CodeBlock = (props) => { // When we call directly from JSX, we usually just pass // a raw string for props.children: // @@ -35,7 +32,7 @@ export default (props) => { // // MDX spits out // - //
CONTENT
+ //
CONTENT
// // except that
 is hijacked replaced with .  In that
   // case, we actually care about the  block's props, not our
@@ -59,16 +56,18 @@ export default (props) => {
     // force the doc authors to specify every time.
     language = content.match(ps1regex) ? 'console' : 'shell';
   }
-
   switch (language) {
-
     case 'console':
       if (!content.match(ps1regex)) {
-        throw Error(`CodeBlock language=${language}: Could not identify PS1: ${content.split("\n")[0]}`);
+        throw Error(
+          `CodeBlock language=${language}: Could not identify PS1: ${
+            content.split('\n')[0]
+          }`,
+        );
       }
       let sections = [[]];
-      let heredoc = ''
-      for (let line of content.split("\n")) {
+      let heredoc = '';
+      for (let line of content.split('\n')) {
         let section = sections[sections.length - 1];
         section.push(line);
 
@@ -76,18 +75,18 @@ export default (props) => {
 
         if (heredoc) {
           if (line === heredoc) {
-            heredoc = ''
+            heredoc = '';
           } else {
             endOfSection = false;
           }
-        } else if (section.join("\n").match(ps1regex)) {
+        } else if (section.join('\n').match(ps1regex)) {
           // In an input block, do minimal parsing of Bash syntax to
           // determine if this is a multi-line command.
           let m = line.match(/[^<]<<([a-zA-Z_]+)$/);
           if (m) {
             heredoc = m[1];
             endOfSection = false;
-          } else if (line.endsWith("\\")) {
+          } else if (line.endsWith('\\')) {
             endOfSection = false;
           }
         }
@@ -99,19 +98,21 @@ export default (props) => {
       return (
         
- - + + Terminal
             {sections.map((section, index) => {
-              const sectionText = section.join("\n");
+              const sectionText = section.join('\n');
               const ps1match = sectionText.match(ps1regex);
               if (ps1match) {
                 return (
                   
- +
{ language="shell" theme={undefined} > - {({ tokens, getLineProps, getTokenProps }) => ( + {({ tokens, getLineProps, getTokenProps }) => tokens.map((line, i) => { const lineProps = getLineProps({ line, key: i }); return ( -
{(i === 0) ? ps1match[0] : ""} - {line.map((token, key) => ( - - ))} +
+ {i === 0 ? ps1match[0] : ''} + {line.map((token, key) => { + let tokenProps = getTokenProps({ + token, + key, + }); + tokenProps.className = `${tokenProps.className} codeBlockText`; + return ; + })}
); }) - )} + }
@@ -139,7 +146,11 @@ export default (props) => { return (
{section.map((line, idx) => ( -
{line}
+
+ + {line} + +
))}
); @@ -149,30 +160,22 @@ export default (props) => {
); - case `mermaid`: - return ( - {content} - ); - default: return ( {({ tokens, getLineProps, getTokenProps }) => (
- - - {language} + + + {props.language ?? language} - +
                 {tokens.map((line, i) => {
@@ -185,11 +188,19 @@ export default (props) => {
                         className,
                       })}
                     >
-                      {line.map((token, key) => (
-                        
-                      ))}
+                      {props.lineNumber && (
+                        
+                          {i + 1}
+                        
+                      )}
+
+                      {line.map((token, key) => {
+                        let tokenProps = getTokenProps({ token, key });
+                        tokenProps.className = `${tokenProps.className} codeBlockText`;
+                        return ;
+                      })}
                     
- ) + ); })}
@@ -197,4 +208,20 @@ export default (props) => { ); } -} +}; + +const Wrapper = (props) => { + const [wrapperRef, inView] = useInView({ + triggerOnce: true, + rootMargin: '0px 0px 300px 0px', + }); + + return ( + <> + + {inView && } + + ); +}; + +export default Wrapper; diff --git a/src/components/CodeBlock/CopyButton.js b/src/components/CodeBlock/CopyButton.js index 5e757599..17a80567 100644 --- a/src/components/CodeBlock/CopyButton.js +++ b/src/components/CodeBlock/CopyButton.js @@ -1,11 +1,11 @@ import React, { useState } from 'react'; -import styles from './styles.module.less'; - -import ThumbsUpIcon from '../../../static/images/doc-icons/thumbs.inline.svg'; import ClipboardIcon from '../../../static/images/doc-icons/clipboard.inline.svg'; +import ThumbsUpIcon from '../../../static/images/doc-icons/thumbs.inline.svg'; + +import * as styles from './styles.module.less'; -const copyToClipboard = content => { +const copyToClipboard = (content) => { const el = document.createElement('textarea'); el.value = content; el.setAttribute('readonly', ''); @@ -14,17 +14,20 @@ const copyToClipboard = content => { document.body.appendChild(el); el.select(); document.execCommand('copy'); - document.body.removeChild(el) + document.body.removeChild(el); }; -const delay = duration => new Promise(resolve => setTimeout(resolve, duration)); +const delay = (duration) => + new Promise((resolve) => setTimeout(resolve, duration)); function CopyButton({ content }) { const [copied, setCopied] = useState(false); return ( - ) + ); } export default CopyButton; diff --git a/src/components/CodeBlock/SanityCodeBlock.js b/src/components/CodeBlock/SanityCodeBlock.js index 1f9bab74..283c99d8 100644 --- a/src/components/CodeBlock/SanityCodeBlock.js +++ b/src/components/CodeBlock/SanityCodeBlock.js @@ -1,10 +1,14 @@ import React from 'react'; + import CodeBlock from './CodeBlock'; -export default ({ code, language }) => { +const SanityCodeBlock = ({ code, language }) => { return (
- {code} + + {code} +
); }; +export default SanityCodeBlock; \ No newline at end of file diff --git a/src/components/CodeBlock/index.js b/src/components/CodeBlock/index.js index 6909cf91..80130dcd 100644 --- a/src/components/CodeBlock/index.js +++ b/src/components/CodeBlock/index.js @@ -1 +1,19 @@ -export { default } from './CodeBlock'; +import React from 'react'; + +const Placeholder = () => null; +const LazyCodeBlock = (props) => { + const [Component, setComponent] = React.useState(() => Placeholder); + React.useEffect(() => { + let cancelled = false + import('./CodeBlock').then((Thing) => { + if (!cancelled) { + setComponent(() => Thing.default) + } + }); + return () => { + cancelled = true + } + }, []); + return ; +}; +export default LazyCodeBlock; diff --git a/src/components/CodeBlock/prism-ambassador.css b/src/components/CodeBlock/prism-ambassador.css index 5b771377..e36ca50a 100644 --- a/src/components/CodeBlock/prism-ambassador.css +++ b/src/components/CodeBlock/prism-ambassador.css @@ -1,36 +1,35 @@ -code[class*="language-"], -pre[class*="language-"] { - color: #fff; - background: none; - font-family: 'Menlo','Consolas', monospace; - font-size: 14px; - text-align: left; - white-space: pre; - word-spacing: normal; - word-break: normal; - word-wrap: normal; - line-height: 21px; /* Keep this in-sync with @line-height in styles.module.less pre[class=language-console] */ - - -moz-tab-size: 4; - -o-tab-size: 4; - tab-size: 4; - - -webkit-hyphens: none; - -moz-hyphens: none; - -ms-hyphens: none; - hyphens: none; - +code[class*='language-'], +pre[class*='language-'] { + color: #fff; + background: none; + font-family: 'Menlo', 'Consolas', monospace; + font-size: 14px; + text-align: left; + white-space: pre; + word-spacing: normal; + word-break: normal; + word-wrap: normal; + line-height: 21px; /* Keep this in-sync with @line-height in styles.module.less pre[class=language-console] */ + + -moz-tab-size: 4; + -o-tab-size: 4; + tab-size: 4; + + -webkit-hyphens: none; + -moz-hyphens: none; + -ms-hyphens: none; + hyphens: none; } /* Code blocks */ -pre[class*="language-"] { - overflow: auto; +pre[class*='language-'] { + overflow: auto; } /* Inline code */ -:not(pre) > code[class*="language-"] { - padding: .1em; - white-space: normal; +:not(pre) > code[class*='language-'] { + padding: 0.1em; + white-space: normal; } .token.comment, @@ -38,35 +37,35 @@ pre[class*="language-"] { .token.prolog, .token.doctype, .token.cdata { - color: #999; + color: #999; } .token.punctuation { - color: #ccc; + color: #ccc; } .token.tag, .token.attr-name, .token.namespace, .token.deleted { - color: #73250e; + color: #73250e; } .token.function-name { - color: #6196cc; + color: #6196cc; } .token.boolean, .token.number, .token.function { - color: #f24e1e; + color: #f24e1e; } .token.property, .token.class-name, .token.constant, .token.symbol { - color: #f5a623; + color: #f5a623; } .token.selector, @@ -74,7 +73,7 @@ pre[class*="language-"] { .token.atrule, .token.keyword, .token.builtin { - color: #af5cf8; + color: #795cec; } .token.string, @@ -82,27 +81,27 @@ pre[class*="language-"] { .token.attr-value, .token.regex, .token.variable { - color: #00c05b; + color: #00c05b; } .token.operator, .token.entity, .token.url { - color: #0d5f80; + color: #0d5f80; } .token.important, .token.bold { - font-weight: bold; + font-weight: bold; } .token.italic { - font-style: italic; + font-style: italic; } .token.entity { - cursor: help; + cursor: help; } .token.inserted { - color: #00c05b; + color: #00c05b; } diff --git a/src/components/CodeBlock/styles.module.less b/src/components/CodeBlock/styles.module.less index 599bac50..59e464b0 100644 --- a/src/components/CodeBlock/styles.module.less +++ b/src/components/CodeBlock/styles.module.less @@ -1,4 +1,4 @@ -@import '../Layout/vars.less'; +@import '~@src/components/Layout/vars.less'; @line-height: 24px; @codeblock-header-height: 36px; @@ -7,224 +7,254 @@ @copybutton-height: 30px; .CopyButton { - background: none; - border: 0; - cursor: pointer; - height: @copybutton-height; - outline: 0; + background: none; + border: 0; + cursor: pointer; + height: @copybutton-height; + outline: 0; + position: absolute; + right: 6px; + text-align: center; + top: calc((@codeblock-header-height - @copybutton-height) / 2); // 3px + width: @copybutton-width; + + .CopyButton__icons { + height: 30px; + left: 0; + overflow: hidden; position: absolute; - right: 6px; - text-align: center; - top: calc((@codeblock-header-height - @copybutton-height) / 2); // 3px - width: @copybutton-width; - - .CopyButton__icons { - height: 30px; - left: 0; - overflow: hidden; - position: absolute; - top: 0; - width: 30px; - } - - .CopyButton__mover { - height: 60px; - left: 50%; - position: absolute; - top: 0; - transform: translate3d(-50%, 0%, 0); - } - - svg { - display: block; - fill: @purple-electric; - height: 30px; - margin: 0; - transition: fill @copybutton-hover-transition; - width: 20px; - } + top: 0; + width: 30px; + } - &:hover svg { - fill: @purple; + .CopyButton__mover { + height: 60px; + left: 50%; + position: absolute; + top: 0; + transform: translate3d(-50%, 0%, 0); + } + + svg { + display: block; + fill: @purple-electric; + height: 30px; + margin: 0; + transition: fill @copybutton-hover-transition; + width: 20px; + } + + &:hover svg { + fill: @purple; + } + + span { + color: @white; + font-size: 14px; + opacity: 0; + pointer-events: none; + position: absolute; + right: 100%; + top: 50%; + transform: translate3d(0, -50%, 0); + } + + &.CopyButton__suceeded .CopyButton__mover { + animation-duration: 1.5s; + animation-name: iconIt; + animation-timing-function: ease-in-out; + } + + @keyframes iconIt { + 0% { + transform: translate3d(-50%, 0%, 0); } - - span { - color: @white; - font-size: 14px; - opacity: 0; - pointer-events: none; - position: absolute; - right: 100%; - top: 50%; - transform: translate3d(0, -50%, 0); + 8% { + transform: translate3d(-50%, -50%, 0); } - - &.CopyButton__suceeded .CopyButton__mover { - animation-duration: 1.5s; - animation-name: iconIt; - animation-timing-function: ease-in-out; + 90% { + transform: translate3d(-50%, -50%, 0); } - - @keyframes iconIt { - 0% { transform: translate3d(-50%, 0%, 0); } - 8% { transform: translate3d(-50%, -50%, 0); } - 90% { transform: translate3d(-50%, -50%, 0); } - 99% { transform: translate3d(-50%, -100%, 0); } - 100% { transform: translate3d(-50%, 0%, 0); } + 99% { + transform: translate3d(-50%, -100%, 0); } - - &.CopyButton__suceeded span { - animation-duration: 1.5s; - animation-name: identifyIt; - animation-timing-function: ease-in-out; + 100% { + transform: translate3d(-50%, 0%, 0); } + } + + &.CopyButton__suceeded span { + animation-duration: 1.5s; + animation-name: identifyIt; + animation-timing-function: ease-in-out; + } - @keyframes identifyIt { - 0% { opacity: 0; } - 50% { opacity: 1; } - 90% { opacity: 1; } - 100% { opacity: 0; } + @keyframes identifyIt { + 0% { + opacity: 0; + } + 50% { + opacity: 1; } + 90% { + opacity: 1; + } + 100% { + opacity: 0; + } + } } .CodeBlock { - position: relative; + position: relative; +} + +.LineNumber { + margin-right: 1rem; + min-width: 1.25rem; + min-width: 1.25rem; + display: inline-block; + text-align: left; + color: @grey-dark-md; } .CodeBlock__light, .CodeBlock__dark { - @codeblock-border-radius: 5px; + @codeblock-border-radius: 5px; + + border-radius: @codeblock-border-radius; + margin: 20px 0; + padding: 0 0 3px; + position: relative; - border-radius: @codeblock-border-radius; - margin: 20px 0; - padding: 0 0 3px; + .CodeBlock__header { + border-radius: @codeblock-border-radius @codeblock-border-radius 0 0; + height: @codeblock-header-height; position: relative; - .CodeBlock__header { - border-radius: @codeblock-border-radius @codeblock-border-radius 0 0; - height: @codeblock-header-height; - position: relative; - - span { - display: inline-block; - font-size: 14px; - line-height: @line-height; - margin: calc((@codeblock-header-height - @line-height) / 2) 0 0 12px; // 6px 0 0 12px - - svg { - display: inline; - height: 18px; - margin: 0 10px 0 0; - vertical-align: middle; - width: 18px; - } - } + span { + display: inline-block; + font-size: 14px; + line-height: @line-height; + margin: calc((@codeblock-header-height - @line-height) / 2) 0 0 12px; // 6px 0 0 12px + + svg { + display: inline; + height: 18px; + margin: 0 10px 0 0; + vertical-align: middle; + width: 18px; + } } - - pre[class*=language-] { - margin: 5px; - overflow: auto; - padding: 10px 25px; + } + + pre[class*='language-'] { + margin: 5px; + overflow-x: auto; + overflow-y: hidden; + padding: 10px 25px; + } + + // Use "[class=whatever]" instead of ".whatever" so that the names + // don't get mangled. + pre[class='language-console'] { + @line-height: 21px; // Keep this in-sync with line-height in prism-ambassador.css pre[class*="language-"]. + + // Allow some extra padding because of the border we draw when hovering over input. + @codeinput-vpad: 3px; + @codeinput-hpad: 5px; + @codeinput-border-width: 1px; + @codeinput-min-height: calc( + @line-height + ((@codeinput-border-width + @codeinput-vpad) * 2) + ); // Make some math below easier. + + div[class*='console-output'] { + padding: 0 @codeinput-hpad; } + div[class*='console-input'] { + position: relative; // Grab the CopyButton; don't let it float up to the parent. - // Use "[class=whatever]" instead of ".whatever" so that the names - // don't get mangled. - pre[class=language-console] { - @line-height: 21px; // Keep this in-sync with line-height in prism-ambassador.css pre[class*="language-"]. + .CopyButton { + top: calc((@codeinput-min-height - @copybutton-height) / 2); + left: -@copybutton-width; - // Allow some extra padding because of the border we draw when hovering over input. - @codeinput-vpad: 3px; - @codeinput-hpad: 5px; - @codeinput-border-width: 1px; - @codeinput-min-height: calc(@line-height + ((@codeinput-border-width + @codeinput-vpad) * 2)); // Make some math below easier. + // Add the border. + & ~ div[class='copy-content'] { + transition: all @copybutton-hover-transition; + border: solid @codeinput-border-width @grey-separator; + border-radius: 5px; - div[class*=console-output] { - padding: 0 @codeinput-hpad; + padding: @codeinput-vpad @codeinput-hpad; + + display: inline-block; // If a line of text grows too long, let the box grow; don't graw the border through some text... + min-width: 100%; // ...but don't let display:inline-block make it narrower than it would be otherwise. } - div[class*=console-input] { - position: relative; // Grab the CopyButton; don't let it float up to the parent. - - .CopyButton { - top: calc((@codeinput-min-height - @copybutton-height) / 2); - left: -@copybutton-width; - - // Add the border. - & ~ div[class=copy-content] { - transition: all @copybutton-hover-transition; - border: solid @codeinput-border-width @grey-separator; - border-radius: 5px; - - padding: @codeinput-vpad @codeinput-hpad; - - display: inline-block; // If a line of text grows too long, let the box grow; don't graw the border through some text... - min-width: 100%; // ...but don't let display:inline-block make it narrower than it would be otherwise. - } - &:hover ~ div[class=copy-content] { - border-color: @purple; - } - } + &:hover ~ div[class='copy-content'] { + border-color: @purple; } + } } + } } .CodeBlock__light { - background-color: @grey-surface; - border: solid 1px @grey-separator; + background-color: @grey-surface; + border: solid 1px @grey-separator; - .CodeBlock__header { - background-color: darken(@grey-surface, 2%); + .CodeBlock__header { + background-color: darken(@grey-surface, 2%); - span { - color: @grey-primary; + span { + color: @grey-primary; - svg { - fill: @grey-primary; - } - } + svg { + fill: @grey-primary; + } + } - .CopyButton { - svg { - fill: @purple; - } + .CopyButton { + svg { + fill: @purple; + } - &:hover svg { - fill: @purple-dark; - } + &:hover svg { + fill: @purple-dark; + } - span { - color: @grey-primary; - } - } + span { + color: @grey-primary; + } } + } - code[class*="language-"], - pre[class*="language-"] { - color: @grey-dark; - } + code[class*='language-'], + pre[class*='language-'] { + color: @grey-dark; + } } .CodeBlock__dark { - background-color: @black; - box-shadow: 2px 4px 8px rgba(39, 39, 51, 0.25); + background-color: @black; + box-shadow: 2px 4px 8px rgba(39, 39, 51, 0.25); - .CodeBlock__header { - background-color: @purple-dark; + .CodeBlock__header { + background-color: @purple-dark; - span { - color: @purple-electric; + span { + color: @purple-electric; - svg { - fill: @purple-electric; - } - } + svg { + fill: @purple-electric; + } } + } } ol { - li { - .CodeBlock__light, - .CodeBlock__dark { - margin-right: 20px; - } + li { + .CodeBlock__light, + .CodeBlock__dark { + margin-right: 20px; } + } } diff --git a/src/components/Docs/Telepresence/ClusterConfig.js b/src/components/Docs/Telepresence/ClusterConfig.js deleted file mode 100644 index 285b95b1..00000000 --- a/src/components/Docs/Telepresence/ClusterConfig.js +++ /dev/null @@ -1,23 +0,0 @@ -import React from 'react'; - - -const ClusterConfig = () => { - const customText = 'Log in and Go to '; - - return ( -

- {customText} - - the teams setting page in Ambassador Cloud - {' '} - and select Licenses for the team you want to create the license - for. -

- ); -}; - -export { ClusterConfig }; diff --git a/src/components/Docs/Telepresence/index.js b/src/components/Docs/Telepresence/index.js deleted file mode 100644 index 22ee165a..00000000 --- a/src/components/Docs/Telepresence/index.js +++ /dev/null @@ -1 +0,0 @@ -export { ClusterConfig } from './ClusterConfig'; diff --git a/src/components/EasyLayout/index.js b/src/components/EasyLayout/index.js index 1d9c2920..6c9d640a 100644 --- a/src/components/EasyLayout/index.js +++ b/src/components/EasyLayout/index.js @@ -25,7 +25,7 @@ export default function EasyLayout({ {title} | {data.site.siteMetadata.title} - + { description && } {children} diff --git a/src/components/Embed/Embed.js b/src/components/Embed/Embed.js new file mode 100644 index 00000000..a4f2061c --- /dev/null +++ b/src/components/Embed/Embed.js @@ -0,0 +1,71 @@ +import classnames from 'classnames'; +import React, { useEffect } from 'react'; +import { useInView } from 'react-intersection-observer'; + +import { YOUTUBE_LITE_URL } from '../../utils/urls'; + +import './lite-yt-embed.css'; +import * as styles from './styles.module.less'; + +function isYoutubeVideo(url) { + return ( + url?.includes('https://www.youtube.com/embed/') || + url?.includes('https://www.youtube-nocookie.com/embed/') + ); +} + +const addScript = () => { + const script = document.createElement('script'); + script.setAttribute('src', YOUTUBE_LITE_URL); + script.setAttribute('type', 'text/javascript'); + script.setAttribute('async', true); + document.body.appendChild(script); +}; + +const Embed = ({ code, customStyles = '' }) => { + const [ref, inView] = useInView({ + triggerOnce: true, + }); + + useEffect(() => { + addScript(); + }, []); + + if (!code) { + return null; + } + + const iFrameSrcRegEx = new RegExp(/src="([^"]*)"|src='([^']*)'/g); + const [, url] = iFrameSrcRegEx.exec(code) || []; + const baseClass = classnames( + customStyles, + styles.wrapper, + 'contained', + 'contained_lg', + ); + // Treat YouTube embeds in a special way + if (isYoutubeVideo(url)) { + const urlArr = url.split('/'); + const youtubeId = urlArr[urlArr.length - 1]; + + return ( +
+ {inView && ( + + )} +
+ ); + } + return ( +
+ ); +}; + +export default Embed; diff --git a/src/components/Embed/index.js b/src/components/Embed/index.js new file mode 100644 index 00000000..30f95b44 --- /dev/null +++ b/src/components/Embed/index.js @@ -0,0 +1 @@ +export { default } from './Embed'; diff --git a/src/components/Embed/lite-yt-embed.css b/src/components/Embed/lite-yt-embed.css new file mode 100644 index 00000000..10b434b0 --- /dev/null +++ b/src/components/Embed/lite-yt-embed.css @@ -0,0 +1,86 @@ +lite-youtube { + background-color: #000; + position: relative; + display: block; + contain: content; + background-position: center center; + background-size: cover; + cursor: pointer; + max-width: 800px; +} + +/* gradient */ +lite-youtube::before { + content: ''; + display: block; + position: absolute; + top: 0; + background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAADGCAYAAAAT+OqFAAAAdklEQVQoz42QQQ7AIAgEF/T/D+kbq/RWAlnQyyazA4aoAB4FsBSA/bFjuF1EOL7VbrIrBuusmrt4ZZORfb6ehbWdnRHEIiITaEUKa5EJqUakRSaEYBJSCY2dEstQY7AuxahwXFrvZmWl2rh4JZ07z9dLtesfNj5q0FU3A5ObbwAAAABJRU5ErkJggg==); + background-position: top; + background-repeat: repeat-x; + height: 60px; + padding-bottom: 50px; + width: 100%; + transition: all 0.2s cubic-bezier(0, 0, 0.2, 1); +} + +/* responsive iframe with a 16:9 aspect ratio + thanks https://css-tricks.com/responsive-iframes/ +*/ +lite-youtube::after { + content: ''; + display: block; + padding-bottom: calc(100% / (16 / 9)); +} +lite-youtube > iframe { + width: 100%; + height: 100%; + position: absolute; + top: 0; + left: 0; + border: 0; +} + +/* play button */ +lite-youtube > .lty-playbtn { + display: block; + width: 68px; + height: 48px; + position: absolute; + cursor: pointer; + transform: translate3d(-50%, -50%, 0); + top: 50%; + left: 50%; + z-index: 1; + background-color: transparent; + /* YT's actual play button svg */ + background-image: url('data:image/svg+xml;utf8,'); + filter: grayscale(100%); + transition: filter 0.1s cubic-bezier(0, 0, 0.2, 1); + border: none; +} + +lite-youtube:hover > .lty-playbtn, +lite-youtube .lty-playbtn:focus { + filter: none; +} + +/* Post-click styles */ +lite-youtube.lyt-activated { + cursor: unset; +} +lite-youtube.lyt-activated::before, +lite-youtube.lyt-activated > .lty-playbtn { + opacity: 0; + pointer-events: none; +} + +.lyt-visually-hidden { + clip: rect(0 0 0 0); + clip-path: inset(50%); + height: 1px; + overflow: hidden; + position: absolute; + white-space: nowrap; + width: 1px; +} diff --git a/src/components/Embed/styles.module.less b/src/components/Embed/styles.module.less new file mode 100644 index 00000000..ca3bb2e9 --- /dev/null +++ b/src/components/Embed/styles.module.less @@ -0,0 +1,25 @@ +.wrapper { + max-width: 800px; + margin: 1.5em auto 2.5em; + padding: 0 20px; + text-align: center; +} + +.sixteenByNine { + // Maintain the iframe's 16/9 proportions + position: relative; + iframe { + position: absolute; + left: 0; + top: 0; + width: 100%; + height: 100%; + } + lite-youtube { + width: 100%; + height: 100%; + } + > div:first-of-type { + padding-bottom: 56.25%; + } +} diff --git a/src/components/InterceptAnimation/InterceptsAnimation.tsx b/src/components/InterceptAnimation/InterceptsAnimation.tsx new file mode 100644 index 00000000..4c6e8b6d --- /dev/null +++ b/src/components/InterceptAnimation/InterceptsAnimation.tsx @@ -0,0 +1,29 @@ +import * as React from 'react'; + +import InterceptAnimationSVG from '../../assets/images/intercept-animation.inline.svg'; + +function Animation(props: React.JSX.IntrinsicAttributes) { + const el = React.useRef(null); + React.useEffect(() => { + const queueAnimation = () => { + setTimeout(() => { + el.current?.getAnimations({ subtree: true })?.forEach((anim) => { + anim.finish(); + anim.play(); + }); + queueAnimation(); + }, 3000); + }; + queueAnimation(); + }, [el]); + return ( +
+ +
+ ); +} + +export default Animation; diff --git a/src/components/InterceptAnimation/index.tsx b/src/components/InterceptAnimation/index.tsx new file mode 100644 index 00000000..320923d0 --- /dev/null +++ b/src/components/InterceptAnimation/index.tsx @@ -0,0 +1,27 @@ +import * as React from 'react'; + +function InterceptAnimationLazy(props: React.JSX.IntrinsicAttributes) { + const [Component, setComponent] = React.useState(null); + const [ReactSuspense, setSuspense] = React.useState(null); + + const FallBack = () =>
Loading...
; + + React.useEffect(() => { + const lazyComp = React.lazy(() => import(`./InterceptsAnimation`)); + setComponent(lazyComp); + setSuspense(React.Suspense); + }, []); + return ( + <> + {Component && ReactSuspense ? ( + }> + + + ) : ( + + )} + + ); +} + +export default InterceptAnimationLazy; diff --git a/src/components/Layout/layout.less b/src/components/Layout/layout.less index eb22b34d..4cbd7966 100644 --- a/src/components/Layout/layout.less +++ b/src/components/Layout/layout.less @@ -2,6 +2,15 @@ @import './vars.less'; // @import './interFont.css'; +::-webkit-scrollbar { + -webkit-appearance: none; + width: 9px; +} +::-webkit-scrollbar-thumb { + border-radius: 5px; + background-color: rgba(0, 0, 0, .5); +} + /* source-sans-pro-300 - latin */ @font-face { font-family: 'Source Sans Pro'; diff --git a/src/components/Link/index.js b/src/components/Link/index.tsx similarity index 69% rename from src/components/Link/index.js rename to src/components/Link/index.tsx index 889c86b4..df3659c7 100644 --- a/src/components/Link/index.js +++ b/src/components/Link/index.tsx @@ -1,14 +1,13 @@ -import React from 'react'; +import * as React from 'react'; import { Link as GatsbyLink } from 'gatsby'; import { Link as ScrollLink } from 'react-scroll'; -import url from 'url'; const Link = ({ children, ...props}) => { - const to = props.to || props.href; + let to = props.to || props.href; if (!to) { // not a link return {children}; - } else if (url.parse(to).protocol || props.target === "_blank") { + } else if (to.indexOf('http://') === 0 || to.indexOf('https://') === 0 || props.target === "_blank") { // external link props.target = "_blank"; props.rel = "nofollow noopener noreferrer"; @@ -22,9 +21,10 @@ const Link = ({ children, ...props}) => { return {children} } else { // internal link to a different page - props.to = to; - delete props.href; - return {children} + if (to.endsWith('.md')) { + to = to.slice(0, -3) + } + return {children} } }; diff --git a/src/components/MainNav/MainNav.js b/src/components/MainNav/MainNav.js index 536eff70..65a533d3 100644 --- a/src/components/MainNav/MainNav.js +++ b/src/components/MainNav/MainNav.js @@ -2,8 +2,7 @@ import React from 'react'; import '../Layout/layout.less'; const LINKS = [ - { label: 'Quick Start', link: '/docs/latest/quick-start/' }, - { label: 'Docs', link: 'https://www.getambassador.io/docs/telepresence-oss/' }, + { label: 'Docs', link: '/docs/latest/quick-start/' }, { label: 'Case Studies', link: '/case-studies' }, { label: 'Community', link: '/community' }, { label: 'About', link: '/about' }, diff --git a/src/components/Markdown/index.js b/src/components/Markdown/index.js deleted file mode 100644 index c143f318..00000000 --- a/src/components/Markdown/index.js +++ /dev/null @@ -1,18 +0,0 @@ -import React from 'react'; -import MDX from '@mdx-js/runtime'; - -import CodeBlock from '../CodeBlock'; -import Link from '../Link'; - -export const components = { - // Override default markdown output. - 'pre': CodeBlock, - 'a': Link, - - // Add new custom components. - // (none right now) -}; - -export default function Markdown({ children }) { - return {children}; -} diff --git a/src/components/Platform/index.js b/src/components/Platform/index.js index 1126f707..235f2da4 100644 --- a/src/components/Platform/index.js +++ b/src/components/Platform/index.js @@ -5,10 +5,10 @@ import TabList from '@material-ui/lab/TabList'; import TabPanel from '@material-ui/lab/TabPanel'; import React from 'react'; +import * as styles from './styles.module.less'; import * as allTabs from './tabs'; -import styles from './styles.module.less'; -let publicTabs = {...allTabs}; +let publicTabs = { ...allTabs }; delete publicTabs.AbstractTab; delete publicTabs.UnknownTab; @@ -28,8 +28,8 @@ function detectUserOS(window) { function isValidTab(element) { return ( - React.isValidElement(element) && - element.type.prototype instanceof allTabs.AbstractTab + React.isValidElement(element) && + element.type.prototype instanceof allTabs.AbstractTab ); } @@ -44,9 +44,9 @@ function Provider({ children, ...props }) { if (!state.setTab) { state.setTab = (newTab) => { window.history.replaceState( - null, - '', - `?os=${newTab}${window.location.hash}`, + null, + '', + `?os=${newTab}${window.location.hash}`, ); setState({ curTab: newTab, @@ -57,7 +57,11 @@ function Provider({ children, ...props }) { React.useEffect(() => { const query = new URLSearchParams(window.location.search); - if (Object.values(publicTabs).map((cls) => cls.slug).includes(query.get('os'))) { + if ( + Object.values(publicTabs) + .map((cls) => cls.slug) + .includes(query.get('os')) + ) { if (state.doAutoDetect || state.curTab !== query.get('os')) { setState({ curTab: query.get('os'), @@ -89,7 +93,7 @@ function TabGroup({ children, ...props }) { const slugs = new Set(children.map((child) => child.type.slug)); if (slugs.size < children.length) { throw new Error( - 'Platform.TabGroup: Has multiple children of the same type', + 'Platform.TabGroup: Has multiple children of the same type', ); } @@ -100,33 +104,31 @@ function TabGroup({ children, ...props }) { }); let { curTab, setTab } = React.useContext(Context); - + if (!curTab) { // This is essentially the noscript case, which is important to // support because of the broken link checker. return ( -
- {sortedChildren.map((child) => { - const Icon = child.type.icon; - return ( -
- - - {child.type.label} - -
- {child.props.children} -
-
- ); - })} -
+
+ {sortedChildren.map((child) => { + const Icon = child.type.icon; + return ( +
+ + + {child.type.label} + +
{child.props.children}
+
+ ); + })} +
); } if (!slugs.has(curTab)) { const defaultChild = [...children].sort( - (a, b) => b.type.priority - a.type.priority, + (a, b) => b.type.priority - a.type.priority, )[0]; curTab = defaultChild.type.slug; } @@ -136,42 +138,44 @@ function TabGroup({ children, ...props }) { }; return ( -
- - - - {sortedChildren.map((child) => { - const Icon = child.type.icon; - return ( - } - label={child.type.label} - className={styles.TabHead} - /> - ); - })} - - - {sortedChildren.map((child) => { - return ( - - {child.props.children} - - ); - })} - -
+
+ + + + {sortedChildren.map((child) => { + const Icon = child.type.icon; + return ( + } + label={child.type.label} + className={styles.TabHead} + /> + ); + })} + + + {sortedChildren.map((child) => { + return ( + + {child.props.children} + + ); + })} + +
); } -export default { +const Platform = { Provider, TabGroup, ...publicTabs, }; + +export default Platform; \ No newline at end of file diff --git a/src/components/Platform/tabs.js b/src/components/Platform/tabs.js index ff14bd81..88095c0e 100644 --- a/src/components/Platform/tabs.js +++ b/src/components/Platform/tabs.js @@ -4,6 +4,7 @@ import AppleIcon from '@src/assets/icons/apple.inline.svg'; import LinuxIcon from '@src/assets/icons/linux.inline.svg'; import WindowsIcon from '@src/assets/icons/windows.inline.svg'; + // prettier-ignore export class AbstractTab extends React.Component { render() { @@ -49,6 +50,33 @@ export class GNULinuxTab extends AbstractTab { } } +export class GNULinuxAMD64Tab extends AbstractTab { + static get order() { return 1; } + static get priority() { return 5; } + static get slug() { return 'gnu-linux-amd64'; } + + static get icon() { return LinuxIcon; } + static get label() { return 'GNU/Linux amd64'; } + + static detect(window) { + return /Linux/.test(window.navigator.platform); + } +} + +export class GNULinuxARM64Tab extends AbstractTab { + static get order() { return 2; } + static get priority() { return 4; } + static get slug() { return 'gnu-linux-arm64'; } + + static get icon() { return LinuxIcon; } + static get label() { return 'GNU/Linux arm64'; } + + static detect(window) { + return /Linux/.test(window.navigator.platform); + } +} + + // prettier-ignore export class MacOSTab extends AbstractTab { static get order() { return 2; } @@ -63,6 +91,45 @@ export class MacOSTab extends AbstractTab { } } +export class MacOSAMD64Tab extends AbstractTab { + static get order() { return 3; } + static get priority() { return 3; } + static get slug() { return 'macos-amd64'; } + + static get icon() { return AppleIcon; } + static get label() { return 'macOS Intel (amd64)'; } + + static detect(window) { + return /Mac(intosh|Intel|PPC|68K)/.test(window.navigator.platform); + } +} + +export class MacOSARM64Tab extends AbstractTab { + static get order() { return 4; } + static get priority() { return 2; } + static get slug() { return 'macos-arm64'; } + + static get icon() { return AppleIcon; } + static get label() { return 'macOS M Series (arm64)'; } + + static detect(window) { + return /Mac(intosh|Intel|PPC|68K)/.test(window.navigator.platform); + } +} + +export class WindowsTabAMD64 extends AbstractTab { + static get order() { return 5; } + static get priority() { return 1; } + static get slug() { return 'windows'; } + + static get icon() { return WindowsIcon; } + static get label() { return 'Windows'; } + + static detect(window) { + return /Win(dows|32|64|CE)/.test(window.navigator.platform); + } +} + // prettier-ignore export class WindowsTab extends AbstractTab { static get order() { return 3; } diff --git a/src/components/ReleaseNotes/Note.tsx b/src/components/ReleaseNotes/Note.tsx deleted file mode 100644 index aa2ce958..00000000 --- a/src/components/ReleaseNotes/Note.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import React from 'react'; -import url from 'url'; - -import Link from '../Link'; - -import BugIcon from '@src/assets/icons/bug.inline.svg'; -import ChangeIcon from '@src/assets/icons/change.inline.svg'; -import TadaIcon from '@src/assets/icons/tada.inline.svg'; -import SecurityIcon from '@src/assets/icons/security.inline.svg'; - -import styles from './releaseNotes.module.less'; - -const titlePrefix = { - bugfix: 'Bug Fix', - change: 'Change', - feature: 'Feature', - security: 'Security Update', -}; - -const typeIcon = { - bugfix: , - change: , - feature: , - security: , -}; - -const Note = ({ note }) => { - const title = (titlePrefix[note.type] ? `${titlePrefix[note.type]}: ` : ``) + note.title; - - return ( - -

{typeIcon[note.type]}{title}

-
- { - note.image && ( - {title} - ) - } - - ); -}; - -export default Note; diff --git a/src/components/ReleaseNotes/Release.tsx b/src/components/ReleaseNotes/Release.tsx deleted file mode 100644 index 36b415ff..00000000 --- a/src/components/ReleaseNotes/Release.tsx +++ /dev/null @@ -1,53 +0,0 @@ -import React from 'react'; -import Note from './Note'; -import styles from './releaseNotes.module.less'; - -const month = [ - 'January', - 'February', - 'March', - 'April', - 'May', - 'June', - 'July', - 'August', - 'September', - 'October', - 'November', - 'December', -]; - -const Release = ({ release }) => { - const formattedDate = (() => { - if (release.date) { - const [yyyy, mm, dd] = release.date.split('-'); - if (yyyy && mm && dd) { - return `${month[Number(mm - 1)]} ${dd}, ${yyyy}`; - } - } - return ''; - })(); - - return ( -
-

- {release.version && ( - <> - Version {release.version}{' '} - {formattedDate && ( - ({formattedDate}) - )} - - )} - {!release.version && formattedDate} -

-
- {release.notes.map((note, index) => ( - - ))} -
-
- ); -}; - -export default Release; diff --git a/src/components/ReleaseNotes/_colors.less b/src/components/ReleaseNotes/_colors.less deleted file mode 100644 index 0d706fda..00000000 --- a/src/components/ReleaseNotes/_colors.less +++ /dev/null @@ -1,37 +0,0 @@ -//font -@InterFont: 'Inter', arial, helvetica, sans-serif; - -@color-black-1: #272733; -@color-black-2: #212121; -@color-black: #000000; -@color-blue-dark: #003380; -@color-blue-light: #599CFF; -@color-blue-surface: #e9f6ff; -@color-blue: #0066FF; -@color-gray-dark: #595F61; -@color-gray-light: #8091A9; -@color-gray: #7C7C87; -@color-green-dark-2: #00401E; -@color-green-dark-3: #0D5F80; -@color-green-dark: #00C05B; -@color-green-light: #9EF0B7; -@color-green-surface: #EDF7ED; -@color-green: #50FF8F; -@color-orange-dark: #F24E1E; -@color-orange: #FA7360; -@color-purple-dark: #552C78; -@color-purple-light: #C98BFF; -@color-purple-surface: #faf4ff; -@color-purple: #AF5CF8; -@color-red-primary: #DB210F; -@color-red-dark: #5C0E06; -@color-red-electric: #e24d3f; -@color-red-surface: #fdecea; -@color-white-1: #E1E9EA; -@color-white-2: #F7F7FA; -@color-white-3: #D8DEE3; -@color-white: #FFFFFF; -@color-wine-dark: #73250E; -@color-wine: #732615; -@color-yellow-dark: #F5A623; -@color-yellow: #F8C369; diff --git a/src/components/ReleaseNotes/releaseNotes.module.less b/src/components/ReleaseNotes/releaseNotes.module.less deleted file mode 100644 index a5a163e3..00000000 --- a/src/components/ReleaseNotes/releaseNotes.module.less +++ /dev/null @@ -1,82 +0,0 @@ -@import './_colors.less'; - -/* Use [class="docs"] selectors to be more specific than docs-layout.less. */ -[class="docs"] [class="docs__main"] .note { - /* The .note is a , style it to look less like a regular link. */ - text-decoration: none; - color: @color-black-1; - &_withlink:hover { - color: @color-blue-dark; - } - - @icon-width: 20px; - @icon-height: 20px; - @icon-gap: 10px; - - margin: 5px 0; /* how much space to put between grid items */ - padding: 0; - - display: grid; - &_withimage { - grid-template-columns: auto 35%; - } - &_withoutimage { - grid-template-columns: auto; - } - grid-column-gap: 10px; - grid-row-gap: 8px; - - h3 { - grid-row: 1; - grid-column: 1; - - align-self: end; - padding: 0; - color: inherit; - line-height: @icon-height; - text-decoration: underline; - padding-left: @icon-width + @icon-gap; - svg { - grid-row: 1; - grid-column: 1; - - width: @icon-width; - height: @icon-height; - margin-left: -(@icon-width+@icon-gap); - margin-right: @icon-gap; - } - } - &_withlink:hover h3 { - text-decoration: none; - } - - &__body { - grid-row: 2; - grid-column: 1; - - align-self: start; - padding-left: @icon-width + @icon-gap; - } - - img { - grid-row-start: 1; - grid-row-end: 3; - grid-column: 2; - - width: 100%; - height: 100%; - max-height: 172px; - object-fit: contain; - } - - -} - -.release { - padding: 0 24px 16px; - border-bottom: 1px solid @color-white-1; - - &__date { - font-size: 16px; - } -} diff --git a/src/custom.d.ts b/src/custom.d.ts new file mode 100644 index 00000000..6db67102 --- /dev/null +++ b/src/custom.d.ts @@ -0,0 +1,4 @@ +declare module "*.svg" { + const content: React.FunctionComponent>; + export default content; +} \ No newline at end of file diff --git a/src/pages/announcing-telepresence-2.js b/src/pages/announcing-telepresence-2.js deleted file mode 100644 index 03d63738..00000000 --- a/src/pages/announcing-telepresence-2.js +++ /dev/null @@ -1,58 +0,0 @@ -import React from 'react'; - -import EasyLayout from '../components/EasyLayout'; -import Markdown from '../components/Markdown'; - -export default function Tel2Page({ location }) { - return ( - -
- {/* FIXME: It's stupid to put docs__main here, but reset.css is a pain and breaks everything until you fix it */} -
- {` - -# Announcing Telepresence 2! - -In November 2020 we released Telepresence 2.0.0 to a small early-access -audience, and have been increasing its exposure ever since then. We've had a -banner on the web-page promoting v2, and have been encouraging community members -to try upgrading from v1; but because v2 is such a big change, we had been -holding of on switching the "default" Telepresence to be v2; v1 was still what -was on the homepage, and was still what \`master\` was in Git. - -Well, over the last 6 months we've made massive strides with v2, and have -finally made the switch-over: Changing the website and Git to be oriented around -v2. - -As a regular end-user of Telepresence, you should find everything you need in -the [upgrading documentation](/docs/latest/install/migrate-from-legacy/). - -As a developer of Telepresence, you may notice some funny business with your -existing Git checkout of Telepresence. We've made a few changes to the Git -repo: - - - We've renamed the old \`master\` branch to \`release/v1\`; there is no longer a - \`master\` branch. - - We've changed the default branch to be \`release/v2\` - -All existing GitHub pull requests for v1 have automatically been re-targetted -from \`master\` to \`release/v1\`. - -If you have an existing local Git checkout, the next time you \`git fetch\`, -you'll want to pass the \`--prune\` flag to clean up anything pointing to the old -\`master\` branch. - -You should be able to switch your checkout v2 as simply as \`git checkout -release/v2\` (after fetching, of course). However, be warned: Don't try to -\`rebase\` any of your v1 work on to v2. They do not share any Git history, and -moreover v2 is a complete rewrite in a different language. - -If you have any questions, please reach out to us on GitHub or in the -#telepresence-oss channel on our [Community Slack](https://slack.cncf.io/). - - `} -
-
-
- ); -} diff --git a/src/pages/related-projects.js b/src/pages/related-projects.js deleted file mode 100644 index 7c326fa0..00000000 --- a/src/pages/related-projects.js +++ /dev/null @@ -1,20 +0,0 @@ -import React from 'react'; - -import EasyLayout from '../components/EasyLayout'; -import Markdown from '../components/Markdown'; - -export default function RelatedProjectsPage({ location }) { - return ( - - {` -# Related Projects - -Ambassador Labs has a number of open source projects that are designed to improve the developer workflow on Kubernetes. - -* [Forge](https://forge.sh) allows developers to define and deploy multi-container applications into Kubernetes, from source, incredibly fast. - -* [Ambassador](https://www.getambassador.io) is a Kubernetes-native API Gateway built on the [Envoy Proxy](https://envoyproxy.github.io), designed for microservices. -`} - - ); -} diff --git a/src/templates/doc-page.js b/src/templates/doc-page.js index f81852f7..2015eb16 100644 --- a/src/templates/doc-page.js +++ b/src/templates/doc-page.js @@ -1,23 +1,41 @@ import React from 'react'; -import { graphql } from 'gatsby'; +import { graphql, navigate } from 'gatsby'; import { Helmet } from 'react-helmet'; import { MDXProvider } from '@mdx-js/react'; import { MDXRenderer } from 'gatsby-plugin-mdx'; -import jsYAML from 'js-yaml'; import url from 'url'; import Layout from '../components/Layout'; -import Release from '../components/ReleaseNotes/Release'; import GithubIcon from '../images/github-icon.inline.svg'; -import { components } from '../components/Markdown'; +import CodeBlock from '../components/CodeBlock'; +import Link from '../components/Link'; import '@fontsource/inter'; import './doc-page.less'; +const mdxComponents = { + // Override default markdown output. + 'pre': CodeBlock, + 'a': Link, + img({ children, ...props}) { + if (props.src.indexOf('//') << 0) { + props.src = '../'+props.src + } + // eslint-disable-next-line + return {children}; + }, + + // Add new custom components. + // (none right now) +}; + // Given a content string and a dict of variables, expand $variables$ in the string. // // https://github.com/gatsbyjs/gatsby/issues/10174#issuecomment-442513501 const template = (content, vars) => { + if (content === null || vars === null) { + return ''; + } return content.replace(/\$(\S+)\$/g, (match, key) => { const value = vars[key]; if (typeof value !== 'undefined') { @@ -49,18 +67,12 @@ const MarkdownContent = ({ mdxNode, variables, siteTitle, - maybeShowReadingTime, }) => { const title = mdxNode.frontmatter.title || mdxNode.headings[0]?.value || "Docs"; const description = mdxNode.frontmatter.description || mdxNode.excerpt; - const readingTime = mdxNode.frontmatter.reading_time || - mdxNode.fields.readingTime.text; - - const showReadingTime = maybeShowReadingTime && - !mdxNode.frontmatter.frontmatter.hide_reading_time; return ( <> @@ -69,8 +81,7 @@ const MarkdownContent = ({ - {showReadingTime ? {readingTime} : ''} - + {template(mdxNode.body, variables)} @@ -79,63 +90,47 @@ const MarkdownContent = ({ ); }; -const ReleaseNotesContent = ({ - fileNode, - variables, - siteTitle, -}) => { - const content = jsYAML.safeLoad(template(fileNode.internal.content, variables)) - - return ( - <> - - {content.docTitle} | {siteTitle} - - - -

{content.docTitle}

- { - content.items.map((release) => ( - - )) - } - { - content.changelog && -

For a detailed list of all the changes in past releases, please - consult the CHANGELOG.

- } - - ); +const handleVersionChange = (event) => { + if (event.target.value) { + navigate(event.target.value); + } }; -export default function DocPage({ location, data, pageContext }) { - const variables = jsYAML.safeLoad(data.variablesFile.internal.content); +const DocPage = props => { + const { location, data, pageContext } = props; + const { docinfo, variables, sidebar } = pageContext; return ( - +
-
-
+ +
+