Updates from: 03/26/2022 02:08:25
Service Microsoft Docs article Related commit history on GitHub Change details
active-directory-b2c Language Customization https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory-b2c/language-customization.md
You configure localized resources elements for the content definition and any la
<!--Local account sign-up or sign-in page English--> <Localization Enabled="true"> ...
- <LocalizedResources Id="api.signuporsignin.en">
- <LocalizedStrings>
- <LocalizedString ElementType="UxElement" StringId="logonIdentifier_email">#Email Address</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="requiredField_email">#Please enter your email</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="logonIdentifier_username">#Username</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="password">#Password</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="createaccount_link">#Sign up now</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="requiredField_username">#Please enter your user name</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="createaccount_intro">#Don't have an account?</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="forgotpassword_link">#Forgot your password?</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="divider_title">#OR</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="cancel_message">#The user has forgotten their password</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="button_signin">#Sign in</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="social_intro">#Sign in with your social account</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="requiredField_password">#Please enter your password</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="invalid_password">#The password you entered is not in the expected format.</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="local_intro_username">#Sign in with your user name</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="local_intro_email">#Sign in with your existing account</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="invalid_email">#Please enter a valid email address</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="unknown_error">#We are having trouble signing you in. Please try again later.</LocalizedString>
- <LocalizedString ElementType="UxElement" StringId="email_pattern">^[a-zA-Z0-9.!#$%&amp;'^_`{}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfInvalidPassword">#Your password is incorrect.</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfClaimsPrincipalDoesNotExist">#We can't seem to find your account.</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfOldPasswordUsed">#Looks like you used an old password.</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="DefaultMessage">#Invalid username or password.</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfUserAccountDisabled">#Your account has been locked. Contact your support person to unlock it, then try again.</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfUserAccountLocked">#Your account is temporarily locked to prevent unauthorized use. Try again later.</LocalizedString>
- <LocalizedString ElementType="ErrorMessage" StringId="AADRequestsThrottled">#There are too many requests at this moment. Please wait for some time and try again.</LocalizedString>
- </LocalizedStrings>
- </LocalizedResources>
+ <LocalizedResources Id="api.signuporsignin.en">
+ <LocalizedStrings>
+ <LocalizedString ElementType="ClaimType" ElementId="signInName" StringId="DisplayName">Email Address</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="heading">Sign in</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="social_intro">Sign in with your social account</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="local_intro_generic">Sign in with your {0}</LocalizedString>
+ <LocalizedString ElementType="ClaimType" ElementId="password" StringId="DisplayName">Password</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="requiredField_password">Please enter your password</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="requiredField_generic">Please enter your {0}</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="invalid_generic">Please enter a valid {0}</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="createaccount_one_link">Sign up now</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="createaccount_two_links">Sign up with {0} or {1}</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="createaccount_three_links">Sign up with {0}, {1}, or {2}</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="forgotpassword_link">Forgot your password?</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="button_signin">Sign in</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="divider_title">OR</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="createaccount_intro">Don't have an account?</LocalizedString>
+ <LocalizedString ElementType="UxElement" StringId="unknown_error">We are having trouble signing you in. Please try again later.</LocalizedString>
+ <!-- Uncomment the remember_me only if the keep me signed in is activated.
+ <LocalizedString ElementType="UxElement" StringId="remember_me">Keep me signed in</LocalizedString> -->
+ <LocalizedString ElementType="ClaimsProvider" StringId="FacebookExchange">Facebook</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="ResourceOwnerFlowInvalidCredentials">Your password is incorrect.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfInvalidPassword">Your password is incorrect.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfPasswordExpired">Your password has expired.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfClaimsPrincipalDoesNotExist">We can't seem to find your account.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfOldPasswordUsed">Looks like you used an old password.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="DefaultMessage">Invalid username or password.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfUserAccountDisabled">Your account has been locked. Contact your support person to unlock it, then try again.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="UserMessageIfUserAccountLocked">Your account is temporarily locked to prevent unauthorized use. Try again later.</LocalizedString>
+ <LocalizedString ElementType="ErrorMessage" StringId="AADRequestsThrottled">There are too many requests at this moment. Please wait for some time and try again.</LocalizedString>
+ </LocalizedStrings>
+ </LocalizedResources>
<!--Local account sign-up or sign-in page Spanish--> <LocalizedResources Id="api.signuporsignin.es"> <LocalizedStrings>
active-directory Concept Sspr Writeback https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/authentication/concept-sspr-writeback.md
Passwords aren't written back in any of the following situations:
> [!WARNING] > Use of the checkbox "User must change password at next logon" in on-premises AD DS administrative tools like Active Directory Users and Computers or the Active Directory Administrative Center is supported as a preview feature of Azure AD Connect. For more information, see [Implement password hash synchronization with Azure AD Connect sync](../hybrid/how-to-connect-password-hash-synchronization.md).
+> [!NOTE]
+> If a user has the option "Password never expires" set in Active Directory (AD), the force password change flag will not be set in Active Directory (AD), so the user will not be prompted to change the password during the next sign-in even if the option to force the user to change their password on next logon option is selected during an administrator-initiated end-user password reset.
+ ## Next steps To get started with SSPR writeback, complete the following tutorial:
active-directory Concept Continuous Access Evaluation https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/conditional-access/concept-continuous-access-evaluation.md
Previously updated : 02/08/2022 Last updated : 03/25/2022
There are two scenarios that make up continuous access evaluation, critical even
### Critical event evaluation
-Continuous access evaluation is implemented by enabling services, like Exchange Online, SharePoint Online, and Teams, to subscribe to critical Azure AD events. Those events can then be evaluated and enforced near real time. Critical event evaluation doesn't rely on Conditional Access policies so it is available in any tenant. The following events are currently evaluated:
+Continuous access evaluation is implemented by enabling services, like Exchange Online, SharePoint Online, and Teams, to subscribe to critical Azure AD events. Those events can then be evaluated and enforced near real time. Critical event evaluation doesn't rely on Conditional Access policies so it's available in any tenant. The following events are currently evaluated:
- User Account is deleted or disabled - Password for a user is changed or reset
CAE only has insight into [IP-based named locations](../conditional-access/locat
> [!IMPORTANT] > If you want your location policies to be enforced in real time by continuous access evaluation, use only the [IP based Conditional Access location condition](../conditional-access/location-condition.md) and configure all IP addresses, **including both IPv4 and IPv6**, that can be seen by your identity provider and resources provider. Do not use country location conditions or the trusted ips feature that is available in Azure AD Multi-Factor Authentication's service settings page.
+### Named location limitations
+
+When the sum of all IP ranges specified in location policies exceeds 5,000 for policies that will be enforced on the Resource provider, user change location flow isn't enforced. In this case, Azure AD will issue a one-hour CAE token and won't enforce client location change; security is improved compared to traditional one-hour tokens since we're still evaluating the [other events](#critical-event-evaluation) besides client location change events.
+ ### Office and Web Account Manager settings | Office update channel | DisableADALatopWAMOverride | DisableAADWAM |
active-directory Quickstart Configure App Expose Web Apis https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/develop/quickstart-configure-app-expose-web-apis.md
Previously updated : 09/03/2020 Last updated : 03/25/2022
The code in a client application requests permission to perform operations defin
First, follow these steps to create an example scope named `Employees.Read.All`: 1. Sign in to the <a href="https://portal.azure.com/" target="_blank">Azure portal</a>.
-1. If you have access to multiple tenants, use the **Directory + subscription** filter :::image type="icon" source="./media/quickstart-configure-app-expose-web-apis/portal-01-directory-subscription-filter.png" border="false"::: in the top menu to select the tenant containing your client app's registration.
+1. If you have access to multiple tenants, use the **Directories + subscriptions** filter :::image type="icon" source="./media/quickstart-configure-app-expose-web-apis/portal-01-directory-subscription-filter.png" border="false"::: in the top menu to select the tenant containing your client app's registration.
1. Select **Azure Active Directory** > **App registrations**, and then select your API's app registration. 1. Select **Expose an API** 1. Select **Set** next to **Application ID URI** if you haven't yet configured one.
active-directory Tutorial V2 Angular Auth Code https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/develop/tutorial-v2-angular-auth-code.md
Previously updated : 04/14/2021 Last updated : 03/25/2022
Register your **Redirect URI** value as **http://localhost:4200/** and type as '
AppRoutingModule, MsalModule.forRoot( new PublicClientApplication({ auth: {
- clientId: 'Enter_the_Application_Id_here', // This is your client ID
- authority: 'Enter_the_Cloud_Instance_Id_Here'/'Enter_the_Tenant_Info_Here', // This is your tenant ID
+ clientId: 'Enter_the_Application_Id_here', // Application (client) ID from the app registration
+ authority: 'Enter_the_Cloud_Instance_Id_Here/Enter_the_Tenant_Info_Here', // The Azure cloud instance and the app's sign-in audience (tenant ID, common, organizations, or consumers)
redirectUri: 'Enter_the_Redirect_Uri_Here'// This is your redirect URI }, cache: {
Register your **Redirect URI** value as **http://localhost:4200/** and type as '
MsalModule.forRoot( new PublicClientApplication({ auth: { clientId: 'Enter_the_Application_Id_here',
- authority: 'Enter_the_Cloud_Instance_Id_Here'/'Enter_the_Tenant_Info_Here',
+ authority: 'Enter_the_Cloud_Instance_Id_Here/Enter_the_Tenant_Info_Here',
redirectUri: 'Enter_the_Redirect_Uri_Here' }, cache: {
Add the code from the following sections to invoke login using a pop-up window o
MsalModule.forRoot( new PublicClientApplication({ auth: { clientId: 'Enter_the_Application_Id_here',
- authority: 'Enter_the_Cloud_Instance_Id_Here'/'Enter_the_Tenant_Info_Here',
+ authority: 'Enter_the_Cloud_Instance_Id_Here/Enter_the_Tenant_Info_Here',
redirectUri: 'Enter_the_Redirect_Uri_Here' }, cache: {
MSAL Angular provides `MsalGuard`, a class you can use to protect routes and req
MsalModule.forRoot( new PublicClientApplication({ auth: { clientId: 'Enter_the_Application_Id_here',
- authority: 'Enter_the_Cloud_Instance_Id_Here'/'Enter_the_Tenant_Info_Here',
+ authority: 'Enter_the_Cloud_Instance_Id_Here/Enter_the_Tenant_Info_Here',
redirectUri: 'Enter_the_Redirect_Uri_Here' }, cache: {
active-directory Workload Identity Federation https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/develop/workload-identity-federation.md
The workflow for exchanging an external token for an access token is the same, h
1. When the checks are satisfied, Microsoft identity platform issues an access token to the external workload. 1. The external workload accesses Azure AD protected resources using the access token from Microsoft identity platform. A GitHub Actions workflow, for example, uses the access token to publish a web app to Azure App Service.
+The Microsoft identity platform stores only the first 10 signing keys when they're downloaded from the external IdP's OIDC endpoint. If the external IdP exposes more than 10 signing keys, you may experience errors when using Workload Identity Federation.
+ ## Next steps Learn more about how workload identity federation works: - How Azure AD uses the [OAuth 2.0 client credentials grant](v2-oauth2-client-creds-grant-flow.md#third-case-access-token-request-with-a-federated-credential) and a client assertion issued by another IdP to get a token.
active-directory Licensing Service Plan Reference https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/enterprise-users/licensing-service-plan-reference.md
Previously updated : 03/10/2022 Last updated : 03/24/2022
When managing licenses in [the Azure portal](https://portal.azure.com/#blade/Mic
- **Service plans included (friendly names)**: A list of service plans (friendly names) in the product that correspond to the string ID and GUID >[!NOTE]
->This information last updated on March 10th, 2022.<br/>You can also download a CSV version of this table [here](https://download.microsoft.com/download/e/3/e/e3e9faf2-f28b-490a-9ada-c6089a1fc5b0/Product%20names%20and%20service%20plan%20identifiers%20for%20licensing.csv).
+>This information last updated on March 23rd, 2022.<br/>You can also download a CSV version of this table [here](https://download.microsoft.com/download/e/3/e/e3e9faf2-f28b-490a-9ada-c6089a1fc5b0/Product%20names%20and%20service%20plan%20identifiers%20for%20licensing.csv).
><br/> | Product name | String ID | GUID | Service plans included | Service plans included (friendly names) |
When managing licenses in [the Azure portal](https://portal.azure.com/#blade/Mic
| Microsoft 365 E3_USGOV_DOD | SPE_E3_USGOV_DOD | d61d61cc-f992-433f-a577-5bd016037eeb | AAD_PREMIUM (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>RMS_S_PREMIUM (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>EXCHANGE_S_ENTERPRISE (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>RMS_S_ENTERPRISE (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>MFA_PREMIUM (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>INTUNE_A (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>STREAM_O365_E3 (9e700747-8b1d-45e5-ab8d-ef187ceec156)<br/>TEAMS_AR_DOD (fd500458-c24c-478e-856c-a6067a8376cd)<br/>OFFICESUBSCRIPTION (43de0ff5-c92c-492b-9116-175376d08c38)<br/>SHAREPOINTWAC (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>SHAREPOINTENTERPRISE (5dbe027f-2339-4123-9542-606e4d348a72)<br/>MCOSTANDARD (0feaeb32-d00e-4d66-bd5a-43b5b83db82c) | Azure Active Directory Premium P1 (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>Azure Information Protection Premium P1 (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>Exchange Online (Plan 2) (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>Microsoft Azure Active Directory Rights (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>Microsoft Azure Multi-Factor Authentication (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>Microsoft Intune (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>Microsoft Stream for O365 E3 SKU (9e700747-8b1d-45e5-ab8d-ef187ceec156)<br/>Microsoft Teams for DOD (AR) (fd500458-c24c-478e-856c-a6067a8376cd)<br/>Office 365 ProPlus (43de0ff5-c92c-492b-9116-175376d08c38)<br/>Office Online (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>SharePoint Online (Plan 2) (5dbe027f-2339-4123-9542-606e4d348a72)<br/>Skype for Business Online (Plan 2) (0feaeb32-d00e-4d66-bd5a-43b5b83db82c) | | Microsoft 365 E3_USGOV_GCCHIGH | SPE_E3_USGOV_GCCHIGH | ca9d1dd9-dfe9-4fef-b97c-9bc1ea3c3658 | AAD_PREMIUM (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>RMS_S_PREMIUM (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>ADALLOM_S_DISCOVERY (932ad362-64a8-4783-9106-97849a1a30b9)<br/>EXCHANGE_S_ENTERPRISE (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>RMS_S_ENTERPRISE (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>MFA_PREMIUM (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>INTUNE_A (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>PROJECTWORKMANAGEMENT (b737dad2-2f6c-4c65-90e3-ca563267e8b9)<br/>STREAM_O365_E3 (9e700747-8b1d-45e5-ab8d-ef187ceec156)<br/>TEAMS_AR_GCCHIGH (9953b155-8aef-4c56-92f3-72b0487fce41)<br/>OFFICESUBSCRIPTION (43de0ff5-c92c-492b-9116-175376d08c38)<br/>SHAREPOINTWAC (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>SHAREPOINTENTERPRISE (5dbe027f-2339-4123-9542-606e4d348a72)<br/>MCOSTANDARD (0feaeb32-d00e-4d66-bd5a-43b5b83db82c) | Azure Active Directory Premium P1 (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>Azure Information Protection Premium P1(6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>Cloud App Security Discovery (932ad362-64a8-4783-9106-97849a1a30b9)<br/>Exchange Online (Plan 2) (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>Microsoft Azure Active Directory Rights (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>Microsoft Azure Multi-Factor Authentication (8a256a2b-b617-496d-b51b-e76466e88db0)<br/> Microsoft Intune (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/> Microsoft Planner (b737dad2-2f6c-4c65-90e3-ca563267e8b9)<br/> Microsoft Stream for O365 E3 SKU (9e700747-8b1d-45e5-ab8d-ef187ceec156)<br/> Microsoft Teams for GCCHigh (AR) (9953b155-8aef-4c56-92f3-72b0487fce41)<br/> Office 365 ProPlus (43de0ff5-c92c-492b-9116-175376d08c38)<br/> Office Online (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/> SharePoint Online (Plan 2) (5dbe027f-2339-4123-9542-606e4d348a72)<br/>Skype for Business Online (Plan 2) (0feaeb32-d00e-4d66-bd5a-43b5b83db82c) | | Microsoft 365 E5 | SPE_E5 | 06ebc4ee-1bb5-47dd-8120-11324bc54e06 | MCOMEETADV (3e26ee1f-8a5f-4d52-aee2-b81ce45c8f40)<br/>AAD_PREMIUM (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>AAD_PREMIUM_P2 (eec0eb4f-6444-4f95-aba0-50c24d67f998)<br/>ATA (14ab5db5-e6c4-4b20-b4bc-13e36fd2227f)<br/>RMS_S_PREMIUM (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>RMS_S_PREMIUM2 (5689bec4-755d-4753-8b61-40975025187c)<br/>LOCKBOX_ENTERPRISE (9f431833-0334-42de-a7dc-70aa40db46db)<br/>EXCHANGE_S_ENTERPRISE (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>FLOW_O365_P3 (07699545-9485-468e-95b6-2fca3738be01)<br/>INFORMATION_BARRIERS (c4801e8a-cb58-4c35-aca6-f2dcc106f287)<br/>MIP_S_CLP2 (efb0351d-3b08-4503-993d-383af8de41e3)<br/>MIP_S_CLP1 (5136a095-5cf0-4aff-bec3-e84448b38ea5)<br/>MYANALYTICS_P2 (33c4f319-9bdd-48d6-9c4d-410b750a4a5a)<br/>RMS_S_ENTERPRISE (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>MFA_PREMIUM (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>ADALLOM_S_STANDALONE (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>WINDEFATP (871d91ec-ec1a-452b-a83f-bd76c7d770ef)<br/>FORMS_PLAN_E5 (e212cbc7-0961-4c40-9825-01117710dcb1)<br/>INTUNE_A (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>KAIZALA_STANDALONE (0898bdbb-73b0-471a-81e5-20f1fe4dd66e)<br/>EXCHANGE_ANALYTICS (34c0d7a0-a70f-4668-9238-47f9fc208882)<br/>PROJECTWORKMANAGEMENT (b737dad2-2f6c-4c65-90e3-ca563267e8b9)<br/>MICROSOFT_SEARCH (94065c59-bc8e-4e8b-89e5-5138d471eaff)<br/>Deskless (8c7d2df8-86f0-4902-b2ed-a0458298f3b3)<br/>STREAM_O365_E5 (6c6042f5-6f01-4d67-b8c1-eb99d36eed3e)<br/>TEAMS1 (57ff2da0-773e-42df-b2af-ffb7a2317929)<br/>INTUNE_O365 (882e1d05-acd1-4ccb-8708-6ee03664b117)<br/>EQUIVIO_ANALYTICS (4de31727-a228-4ec3-a5bf-8e45b5ca48cc)<br/>ADALLOM_S_O365 (8c098270-9dd4-4350-9b30-ba4703f3b36b)<br/>ATP_ENTERPRISE (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>THREAT_INTELLIGENCE (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>PAM_ENTERPRISE (b1188c4c-1b36-4018-b48b-ee07604f6feb)<br/>OFFICESUBSCRIPTION (43de0ff5-c92c-492b-9116-175376d08c38)<br/>SHAREPOINTWAC (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>MCOEV (4828c8ec-dc2e-4779-b502-87ac9ce28ab7)<br/>BI_AZURE_P2 (70d33638-9c74-4d01-bfd3-562de28bd4ba)<br/>POWERAPPS_O365_P3 (9c0dab89-a30c-4117-86e7-97bda240acd2)<br/>PREMIUM_ENCRYPTION (617b097b-4b93-4ede-83de-5f075bb5fb2f)<br/>SHAREPOINTENTERPRISE (5dbe027f-2339-4123-9542-606e4d348a72)<br/>MCOSTANDARD (0feaeb32-d00e-4d66-bd5a-43b5b83db82c)<br/>SWAY (a23b959c-7ce8-4e57-9140-b90eb88a9e97)<br/>BPOS_S_TODO_3 (3fb82609-8c27-4f7b-bd51-30634711ee67)<br/>WHITEBOARD_PLAN3 (4a51bca5-1eff-43f5-878c-177680f191af)<br/>WIN10_PRO_ENT_SUB (21b439ba-a0ca-424f-a6cc-52f954a5b111)<br/>YAMMER_ENTERPRISE (7547a3fe-08ee-4ccb-b430-5077c5041653) | Audio Conferencing (3e26ee1f-8a5f-4d52-aee2-b81ce45c8f40)<br/>Azure Active Directory Premium P1 (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>Azure Active Directory Premium P2 (eec0eb4f-6444-4f95-aba0-50c24d67f998)<br/>Azure Advanced Threat Protection (14ab5db5-e6c4-4b20-b4bc-13e36fd2227f)<br/>Azure Information Protection Premium P1 (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>Azure Information Protection Premium P2 (5689bec4-755d-4753-8b61-40975025187c)<br/>Customer Lockbox (9f431833-0334-42de-a7dc-70aa40db46db)<br/>Exchange Online (Plan 2) (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>Flow for Office 365 (07699545-9485-468e-95b6-2fca3738be01)<br/>Information Barriers (c4801e8a-cb58-4c35-aca6-f2dcc106f287)<br/>Information Protection for Office 365 - Premium (efb0351d-3b08-4503-993d-383af8de41e3)<br/>Information Protection for Office 365 - Standard (5136a095-5cf0-4aff-bec3-e84448b38ea5)<br/>Insights by MyAnalytics (33c4f319-9bdd-48d6-9c4d-410b750a4a5a)<br/>Microsoft Azure Active Directory Rights (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>Microsoft Azure Multi-Factor Authentication (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>Microsoft Cloud App Security (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>MICROSOFT DEFENDER FOR ENDPOINT (871d91ec-ec1a-452b-a83f-bd76c7d770ef)<br/>Microsoft Forms (Plan E5) (e212cbc7-0961-4c40-9825-01117710dcb1)<br/>Microsoft Intune (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>Microsoft Kaizala (0898bdbb-73b0-471a-81e5-20f1fe4dd66e)<br/>Microsoft MyAnalytics (Full) (34c0d7a0-a70f-4668-9238-47f9fc208882)<br/>Microsoft Planner (b737dad2-2f6c-4c65-90e3-ca563267e8b9)<br/>Microsoft Search (94065c59-bc8e-4e8b-89e5-5138d471eaff)<br/>Microsoft StaffHub (8c7d2df8-86f0-4902-b2ed-a0458298f3b3)<br/>Microsoft Stream for O365 E5 SKU (6c6042f5-6f01-4d67-b8c1-eb99d36eed3e)<br/>Microsoft Teams (57ff2da0-773e-42df-b2af-ffb7a2317929)<br/>Mobile Device Management for Office 365 (882e1d05-acd1-4ccb-8708-6ee03664b117)<br/>Office 365 Advanced eDiscovery (4de31727-a228-4ec3-a5bf-8e45b5ca48cc)<br/>Office 365 Advanced Security Management (8c098270-9dd4-4350-9b30-ba4703f3b36b)<br/>Microsoft Defender for Office 365 (Plan 1) (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>Microsoft Defender for Office 365 (Plan 2) (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>Office 365 Privileged Access Management (b1188c4c-1b36-4018-b48b-ee07604f6feb)<br/>Office 365 ProPlus (43de0ff5-c92c-492b-9116-175376d08c38)<br/>Office Online (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>Phone System (4828c8ec-dc2e-4779-b502-87ac9ce28ab7)<br/>Power BI Pro (70d33638-9c74-4d01-bfd3-562de28bd4ba)<br/>PowerApps for Office 365 Plan 3 (9c0dab89-a30c-4117-86e7-97bda240acd2)<br/>Premium Encryption in Office 365 (617b097b-4b93-4ede-83de-5f075bb5fb2f)<br/>SharePoint Online (Plan 2) (5dbe027f-2339-4123-9542-606e4d348a72)<br/>Skype for Business Online (Plan 2) (0feaeb32-d00e-4d66-bd5a-43b5b83db82c)<br/>Sway (a23b959c-7ce8-4e57-9140-b90eb88a9e97)<br/>To-Do (Plan 3) (3fb82609-8c27-4f7b-bd51-30634711ee67)<br/>Whiteboard (Plan 3) (4a51bca5-1eff-43f5-878c-177680f191af)<br/>Windows 10 Enterprise (Original) (21b439ba-a0ca-424f-a6cc-52f954a5b111)<br/>Yammer Enterprise (7547a3fe-08ee-4ccb-b430-5077c5041653) |
+| Microsoft 365 E5 Developer (without Windows and Audio Conferencing) | DEVELOPERPACK_E5 | c42b9cae-ea4f-4ab7-9717-81576235ccac | RMS_S_ENTERPRISE (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>CDS_O365_P3 (afa73018-811e-46e9-988f-f75d2b1b8430)<br/>LOCKBOX_ENTERPRISE (9f431833-0334-42de-a7dc-70aa40db46db)<br/>MIP_S_Exchange (cd31b152-6326-4d1b-ae1b-997b625182e6)<br/>EXCHANGE_S_ENTERPRISE (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>GRAPH_CONNECTORS_SEARCH_INDEX (a6520331-d7d4-4276-95f5-15c0933bc757)<br/>Content_Explorer (d9fa6af4-e046-4c89-9226-729a0786685d)<br/>MIP_S_CLP2 (efb0351d-3b08-4503-993d-383af8de41e3)<br/>MIP_S_CLP1 (5136a095-5cf0-4aff-bec3-e84448b38ea5)<br/>M365_ADVANCED_AUDITING (2f442157-a11c-46b9-ae5b-6e39ff4e5849)<br/>OFFICESUBSCRIPTION (43de0ff5-c92c-492b-9116-175376d08c38)<br/>MICROSOFT_COMMUNICATION_COMPLIANCE (a413a9ff-720c-4822-98ef-2f37c2a21f4c)<br/>MTP (bf28f719-7844-4079-9c78-c1307898e192)<br/>MCOEV (4828c8ec-dc2e-4779-b502-87ac9ce28ab7)<br/>MICROSOFTBOOKINGS (199a5c09-e0ca-4e37-8f7c-b05d533e1ea2)<br/>COMMUNICATIONS_DLP (6dc145d6-95dd-4191-b9c3-185575ee6f6b)<br/>CUSTOMER_KEY (6db1f1db-2b46-403f-be40-e39395f08dbb)<br/>DATA_INVESTIGATIONS (46129a58-a698-46f0-aa5b-17f6586297d9)<br/>ATP_ENTERPRISE (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>THREAT_INTELLIGENCE (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>EXCEL_PREMIUM (531ee2f8-b1cb-453b-9c21-d2180d014ca5)<br/>FORMS_PLAN_E5 (e212cbc7-0961-4c40-9825-01117710dcb1)<br/>INFO_GOVERNANCE (e26c2fcc-ab91-4a61-b35c-03cdc8dddf66)<br/>INSIDER_RISK (d587c7a3-bda9-4f99-8776-9bcf59c84f75)<br/>ML_CLASSIFICATION (d2d51368-76c9-4317-ada2-a12c004c432f)<br/>EXCHANGE_ANALYTICS (34c0d7a0-a70f-4668-9238-47f9fc208882)<br/>PROJECTWORKMANAGEMENT (b737dad2-2f6c-4c65-90e3-ca563267e8b9)<br/>RECORDS_MANAGEMENT (65cc641f-cccd-4643-97e0-a17e3045e541)<br/>MICROSOFT_SEARCH (94065c59-bc8e-4e8b-89e5-5138d471eaff)<br/>Deskless (8c7d2df8-86f0-4902-b2ed-a0458298f3b3)<br/>STREAM_O365_E5 (6c6042f5-6f01-4d67-b8c1-eb99d36eed3e)<br/>TEAMS1 (57ff2da0-773e-42df-b2af-ffb7a2317929)<br/>INTUNE_O365 (882e1d05-acd1-4ccb-8708-6ee03664b117)<br/>Nucleus (db4d623d-b514-490b-b7ef-8885eee514de)<br/>EQUIVIO_ANALYTICS (4de31727-a228-4ec3-a5bf-8e45b5ca48cc)<br/>ADALLOM_S_O365 (8c098270-9dd4-4350-9b30-ba4703f3b36b)<br/>PAM_ENTERPRISE (b1188c4c-1b36-4018-b48b-ee07604f6feb)<br/>SAFEDOCS (bf6f5520-59e3-4f82-974b-7dbbc4fd27c7)<br/>SHAREPOINTWAC (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>POWERAPPS_O365_P3 (9c0dab89-a30c-4117-86e7-97bda240acd2)<br/>BI_AZURE_P2 (70d33638-9c74-4d01-bfd3-562de28bd4ba)<br/>PROJECT_O365_P3 (b21a6b06-1988-436e-a07b-51ec6d9f52ad)<br/>COMMUNICATIONS_COMPLIANCE (41fcdd7d-4733-4863-9cf4-c65b83ce2df4)<br/>INSIDER_RISK_MANAGEMENT (9d0c4ee5-e4a1-4625-ab39-d82b619b1a34)<br/>SHAREPOINTENTERPRISE (5dbe027f-2339-4123-9542-606e4d348a72)<br/>MCOSTANDARD (0feaeb32-d00e-4d66-bd5a-43b5b83db82c)<br/>SWAY (a23b959c-7ce8-4e57-9140-b90eb88a9e97)<br/>BPOS_S_TODO_3 (3fb82609-8c27-4f7b-bd51-30634711ee67)<br/>VIVA_LEARNING_SEEDED (b76fb638-6ba6-402a-b9f9-83d28acb3d86)<br/>WHITEBOARD_PLAN3 (4a51bca5-1eff-43f5-878c-177680f191af)<br/>YAMMER_ENTERPRISE (7547a3fe-08ee-4ccb-b430-5077c5041653)<br/>AAD_PREMIUM (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>AAD_PREMIUM_P2 (eec0eb4f-6444-4f95-aba0-50c24d67f998)<br/>RMS_S_PREMIUM (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>RMS_S_PREMIUM2 (5689bec4-755d-4753-8b61-40975025187c)<br/>DYN365_CDS_O365_P3 (28b0fa46-c39a-4188-89e2-58e979a6b014)<br/>MFA_PREMIUM (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>ADALLOM_S_STANDALONE (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>ATA (14ab5db5-e6c4-4b20-b4bc-13e36fd2227f)<br/>INTUNE_A (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>FLOW_O365_P3 (07699545-9485-468e-95b6-2fca3738be01)<br/>POWER_VIRTUAL_AGENTS_O365_P3 (ded3d325-1bdc-453e-8432-5bac26d7a014) | Azure Rights Management (bea4c11e-220a-4e6d-8eb8-8ea15d019f90)<br/>Common Data Service for Teams_P3 (afa73018-811e-46e9-988f-f75d2b1b8430)<br/>Customer Lockbox (9f431833-0334-42de-a7dc-70aa40db46db)<br/>Data Classification in Microsoft 365 (cd31b152-6326-4d1b-ae1b-997b625182e6)<br/>Exchange Online (Plan 2) (efb87545-963c-4e0d-99df-69c6916d9eb0)<br/>Graph Connectors Search with Index (a6520331-d7d4-4276-95f5-15c0933bc757)<br/>Information Protection and Governance Analytics - Premium (d9fa6af4-e046-4c89-9226-729a0786685d)<br/>Information Protection for Office 365 - Premium (efb0351d-3b08-4503-993d-383af8de41e3)<br/>Information Protection for Office 365 - Standard (5136a095-5cf0-4aff-bec3-e84448b38ea5)<br/>Microsoft 365 Advanced Auditing (2f442157-a11c-46b9-ae5b-6e39ff4e5849)<br/>Microsoft 365 Apps for Enterprise (43de0ff5-c92c-492b-9116-175376d08c38)<br/>Microsoft 365 Communication Compliance (a413a9ff-720c-4822-98ef-2f37c2a21f4c)<br/>Microsoft 365 Defender (bf28f719-7844-4079-9c78-c1307898e192)<br/>Microsoft 365 Phone System (4828c8ec-dc2e-4779-b502-87ac9ce28ab7)<br/>Microsoft Bookings (199a5c09-e0ca-4e37-8f7c-b05d533e1ea2)<br/>Microsoft Communications DLP (6dc145d6-95dd-4191-b9c3-185575ee6f6b)<br/>Microsoft Customer Key (6db1f1db-2b46-403f-be40-e39395f08dbb)<br/>Microsoft Data Investigations (46129a58-a698-46f0-aa5b-17f6586297d9)<br/>Microsoft Defender for Office 365 (Plan 1) (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>Microsoft Defender for Office 365 (Plan 2) (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>Microsoft Excel Advanced Analytics (531ee2f8-b1cb-453b-9c21-d2180d014ca5)<br/>Microsoft Forms (Plan E5) (e212cbc7-0961-4c40-9825-01117710dcb1)<br/>Microsoft Information Governance (e26c2fcc-ab91-4a61-b35c-03cdc8dddf66)<br/>Microsoft Insider Risk Management (d587c7a3-bda9-4f99-8776-9bcf59c84f75)<br/>Microsoft ML-Based Classification (d2d51368-76c9-4317-ada2-a12c004c432f)<br/>Microsoft MyAnalytics (Full) (34c0d7a0-a70f-4668-9238-47f9fc208882)<br/>Microsoft Planner (b737dad2-2f6c-4c65-90e3-ca563267e8b9)<br/>Microsoft Records Management (65cc641f-cccd-4643-97e0-a17e3045e541)<br/>Microsoft Search (94065c59-bc8e-4e8b-89e5-5138d471eaff)<br/>Microsoft StaffHub (8c7d2df8-86f0-4902-b2ed-a0458298f3b3)<br/>Microsoft Stream for Office 365 E5 (6c6042f5-6f01-4d67-b8c1-eb99d36eed3e)<br/>Microsoft Teams (57ff2da0-773e-42df-b2af-ffb7a2317929)<br/>Mobile Device Management for Office 365 (882e1d05-acd1-4ccb-8708-6ee03664b117)<br/>Nucleus (db4d623d-b514-490b-b7ef-8885eee514de)<br/>Office 365 Advanced eDiscovery (4de31727-a228-4ec3-a5bf-8e45b5ca48cc)<br/>Office 365 Advanced Security Management (8c098270-9dd4-4350-9b30-ba4703f3b36b)<br/>Office 365 Privileged Access Management (b1188c4c-1b36-4018-b48b-ee07604f6feb)<br/>Office 365 SafeDocs (bf6f5520-59e3-4f82-974b-7dbbc4fd27c7)<br/>Office for the Web (e95bec33-7c88-4a70-8e19-b10bd9d0c014)<br/>Power Apps for Office 365 (Plan 3) (9c0dab89-a30c-4117-86e7-97bda240acd2)<br/>Power BI Pro (70d33638-9c74-4d01-bfd3-562de28bd4ba)<br/>Project for Office (Plan E5) (b21a6b06-1988-436e-a07b-51ec6d9f52ad)<br/>Microsoft Communications Compliance (41fcdd7d-4733-4863-9cf4-c65b83ce2df4)<br/>Microsoft Insider Risk Management (9d0c4ee5-e4a1-4625-ab39-d82b619b1a34)<br/>SharePoint (Plan 2) (5dbe027f-2339-4123-9542-606e4d348a72)<br/>Skype for Business Online (Plan 2) (0feaeb32-d00e-4d66-bd5a-43b5b83db82c)<br/>Sway (a23b959c-7ce8-4e57-9140-b90eb88a9e97)<br/>To-Do (Plan 3) (3fb82609-8c27-4f7b-bd51-30634711ee67)<br/>Viva Learning Seeded (b76fb638-6ba6-402a-b9f9-83d28acb3d86)<br/>Whiteboard (Plan 3) (4a51bca5-1eff-43f5-878c-177680f191af)<br/>Yammer Enterprise (7547a3fe-08ee-4ccb-b430-5077c5041653)<br/>Azure Active Directory Premium P1 (41781fb2-bc02-4b7c-bd55-b576c07bb09d)<br/>Azure Active Directory Premium P2 (eec0eb4f-6444-4f95-aba0-50c24d67f998)<br/>Azure Information Protection Premium P1 (6c57d4b6-3b23-47a5-9bc9-69f17b4947b3)<br/>Azure Information Protection Premium P2 (5689bec4-755d-4753-8b61-40975025187c)<br/>Common Data Service - O365 P3 (28b0fa46-c39a-4188-89e2-58e979a6b014)<br/>Microsoft Azure Multi-Factor Authentication (8a256a2b-b617-496d-b51b-e76466e88db0)<br/>Microsoft Defender for Cloud Apps (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>Microsoft Defender for Identity (14ab5db5-e6c4-4b20-b4bc-13e36fd2227f)<br/>Microsoft Intune (c1ec4a95-1f05-45b3-a911-aa3fa01094f5)<br/>Power Automate for Office 365 (07699545-9485-468e-95b6-2fca3738be01)<br/>Power Virtual Agents for Office 365 P3 (ded3d325-1bdc-453e-8432-5bac26d7a014) |
| Microsoft 365 E5 Compliance | INFORMATION_PROTECTION_COMPLIANCE | 184efa21-98c3-4e5d-95ab-d07053a96e67 | RMS_S_PREMIUM2 (5689bec4-755d-4753-8b61-40975025187c)<br/>LOCKBOX_ENTERPRISE (9f431833-0334-42de-a7dc-70aa40db46db)<br/>MIP_S_Exchange (cd31b152-6326-4d1b-ae1b-997b625182e6)<br/>INFORMATION_BARRIERS (c4801e8a-cb58-4c35-aca6-f2dcc106f287)<br/>Content_Explorer (d9fa6af4-e046-4c89-9226-729a0786685d)<br/>ContentExplorer_Standard (2b815d45-56e4-4e3a-b65c-66cb9175b560)<br/>MIP_S_CLP2 (efb0351d-3b08-4503-993d-383af8de41e3)<br/>M365_ADVANCED_AUDITING (2f442157-a11c-46b9-ae5b-6e39ff4e5849)<br/>MICROSOFT_COMMUNICATION_COMPLIANCE (a413a9ff-720c-4822-98ef-2f37c2a21f4c)<br/>ADALLOM_S_STANDALONE (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>COMMUNICATIONS_DLP (6dc145d6-95dd-4191-b9c3-185575ee6f6b)<br/>CUSTOMER_KEY (6db1f1db-2b46-403f-be40-e39395f08dbb)<br/>DATA_INVESTIGATIONS (46129a58-a698-46f0-aa5b-17f6586297d9)<br/>MICROSOFTENDPOINTDLP (64bfac92-2b17-4482-b5e5-a0304429de3e)<br/>INFO_GOVERNANCE (e26c2fcc-ab91-4a61-b35c-03cdc8dddf66)<br/>INSIDER_RISK (d587c7a3-bda9-4f99-8776-9bcf59c84f75)<br/>ML_CLASSIFICATION (d2d51368-76c9-4317-ada2-a12c004c432f)<br/>RECORDS_MANAGEMENT (65cc641f-cccd-4643-97e0-a17e3045e541)<br/>EQUIVIO_ANALYTICS (4de31727-a228-4ec3-a5bf-8e45b5ca48cc)<br/>PAM_ENTERPRISE (b1188c4c-1b36-4018-b48b-ee07604f6feb)<br/>PREMIUM_ENCRYPTION (617b097b-4b93-4ede-83de-5f075bb5fb2f)<br/>COMMUNICATIONS_COMPLIANCE (41fcdd7d-4733-4863-9cf4-c65b83ce2df4)<br/>INSIDER_RISK_MANAGEMENT (9d0c4ee5-e4a1-4625-ab39-d82b619b1a34) | Azure Information Protection Premium P2 (5689bec4-755d-4753-8b61-40975025187c)<br/>Customer Lockbox (9f431833-0334-42de-a7dc-70aa40db46db)<br/>Data Classification in Microsoft 365 (cd31b152-6326-4d1b-ae1b-997b625182e6)<br/>Information Barriers (c4801e8a-cb58-4c35-aca6-f2dcc106f287)<br/>Information Protection and Governance Analytics ΓÇô Premium (d9fa6af4-e046-4c89-9226-729a0786685d)<br/>Information Protection and Governance Analytics ΓÇô Standard (2b815d45-56e4-4e3a-b65c-66cb9175b560)<br/>Information Protection for Office 365 ΓÇô Premium (efb0351d-3b08-4503-993d-383af8de41e3)<br/>Microsoft 365 Advanced Auditing (2f442157-a11c-46b9-ae5b-6e39ff4e5849)<br/>Microsoft 365 Communication Compliance (a413a9ff-720c-4822-98ef-2f37c2a21f4c)<br/>Microsoft Cloud App Security (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>Microsoft Communications DLP (6dc145d6-95dd-4191-b9c3-185575ee6f6b)<br/>Microsoft Customer Key (6db1f1db-2b46-403f-be40-e39395f08dbb)<br/>Microsoft Data Investigations (46129a58-a698-46f0-aa5b-17f6586297d9)<br/>Microsoft Endpoint DLP (64bfac92-2b17-4482-b5e5-a0304429de3e)<br/>Microsoft Information Governance (e26c2fcc-ab91-4a61-b35c-03cdc8dddf66)<br/>Microsoft Insider Risk Management (d587c7a3-bda9-4f99-8776-9bcf59c84f75)<br/>Microsoft ML-Based Classification (d2d51368-76c9-4317-ada2-a12c004c432f)<br/>Microsoft Records Management (65cc641f-cccd-4643-97e0-a17e3045e541)<br/>Office 365 Advanced eDiscovery (4de31727-a228-4ec3-a5bf-8e45b5ca48cc)<br/>Office 365 Privileged Access Management (b1188c4c-1b36-4018-b48b-ee07604f6feb)<br/>Premium Encryption in Office 365 (617b097b-4b93-4ede-83de-5f075bb5fb2f)<br/>Microsoft Communications Compliance (41fcdd7d-4733-4863-9cf4-c65b83ce2df4)<br/>Microsoft Insider Risk Management (9d0c4ee5-e4a1-4625-ab39-d82b619b1a34) | | Microsoft 365 E5 Security | IDENTITY_THREAT_PROTECTION | 26124093-3d78-432b-b5dc-48bf992543d5 | AAD_PREMIUM_P2 (eec0eb4f-6444-4f95-aba0-50c24d67f998)<br/>ATA (14ab5db5-e6c4-4b20-b4bc-13e36fd2227f)<br/>ADALLOM_S_STANDALONE (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>WINDEFATP (871d91ec-ec1a-452b-a83f-bd76c7d770ef)<br/>ATP_ENTERPRISE (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>THREAT_INTELLIGENCE (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>SAFEDOCS (bf6f5520-59e3-4f82-974b-7dbbc4fd27c7) | Azure Active Directory Premium P2 (eec0eb4f-6444-4f95-aba0-50c24d67f998)<br/>Azure Advanced Threat Protection (14ab5db5-e6c4-4b20-b4bc-13e36fd2227f)<br/>Microsoft Cloud App Security (2e2ddb96-6af9-4b1d-a3f0-d6ecfd22edb2)<br/>MICROSOFT DEFENDER FOR ENDPOINT (871d91ec-ec1a-452b-a83f-bd76c7d770ef)<br/>Microsoft Defender for Office 365 (Plan 1) (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>Microsoft Defender for Office 365 (Plan 2) (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>Office 365 SafeDocs (bf6f5520-59e3-4f82-974b-7dbbc4fd27c7) | | Microsoft 365 E5 Security for EMS E5 | IDENTITY_THREAT_PROTECTION_FOR_EMS_E5 | 44ac31e7-2999-4304-ad94-c948886741d4 | WINDEFATP (871d91ec-ec1a-452b-a83f-bd76c7d770ef)<br/>ATP_ENTERPRISE (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>THREAT_INTELLIGENCE (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>SAFEDOCS (bf6f5520-59e3-4f82-974b-7dbbc4fd27c7) | MICROSOFT DEFENDER FOR ENDPOINT (871d91ec-ec1a-452b-a83f-bd76c7d770ef)<br/>Microsoft Defender for Office 365 (Plan 1) (f20fedf3-f3c3-43c3-8267-2bfdd51c0939)<br/>Microsoft Defender for Office 365 (Plan 2) (8e0c0a52-6a6c-4d40-8370-dd62790dcd70)<br/>Office 365 SafeDocs (bf6f5520-59e3-4f82-974b-7dbbc4fd27c7) |
active-directory Whats New Archive https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/fundamentals/whats-new-archive.md
Assigning roles to Azure AD groups is now generally available. This feature can
In August 2021, we have added following 46 new applications in our App gallery with Federation support:
-[Siriux Customer Dashboard](https://portal.siriux.tech/login), [STRUXI](https://struxi.app/), [Autodesk Construction Cloud - Meetings](https://acc.autodesk.com/), [Eccentex AppBase for Azure](../saas-apps/eccentex-appbase-for-azure-tutorial.md), [Bookado](https://adminportal.bookado.io/), [FilingRamp](https://app.filingramp.com/login), [BenQ IAM](../saas-apps/benq-iam-tutorial.md), [Rhombus Systems](../saas-apps/rhombus-systems-tutorial.md), [CorporateExperience](../saas-apps/corporateexperience-tutorial.md), [TutorOcean](../saas-apps/tutorocean-tutorial.md), [Bookado Device](https://adminportal.bookado.io/), [HiFives-AD-SSO](https://app.hifives.in/login/azure), [Darzin](https://au.darzin.com/), [Simply Stakeholders](https://au.simplystakeholders.com/), [KACTUS HCM - Smart People](https://kactusspc.digitalware.co/), [Five9 UC Adapter for Microsoft Teams V2](https://uc.five9.net/?vendor=msteams), [Automation Center](https://automationcenter.cognizantgoc.com/portal/boot/signon), [Cirrus Identity Bridge for Azure AD](../saas-apps/cirrus-identity-bridge-for-azure-ad-tutorial.md), [ShiftWizard SAML](../saas-apps/shiftwizard-saml-tutorial.md), [Safesend Returns](https://www.safesendwebsites.com/), [Brushup](../saas-apps/brushup-tutorial.md), [directprint.io Cloud Print Administration](../saas-apps/directprint-io-cloud-print-administration-tutorial.md), [plain-x](https://app.plain-x.com/#/login),[X-point Cloud](../saas-apps/x-point-cloud-tutorial.md), [SmartHub INFER](../saas-apps/smarthub-infer-tutorial.md), [Fresh Relevance](../saas-apps/fresh-relevance-tutorial.md), [FluentPro G.A. Suite](https://gas.fluentpro.com/Account/SSOLogin?provider=Microsoft), [Clockwork Recruiting](../saas-apps/clockwork-recruiting-tutorial.md), [WalkMe SAML2.0](../saas-apps/walkme-saml-tutorial.md), [Sideways 6](https://app.sideways6.com/account/login?ReturnUrl=/), [Kronos Workforce Dimensions](../saas-apps/kronos-workforce-dimensions-tutorial.md), [SysTrack Cloud Edition](https://cloud.lakesidesoftware.com/Cloud/Account/Login), [mailworx Dynamics CRM Connector](https://www.mailworx.info/), [Palo Alto Networks Cloud Identity Engine - Cloud Authentication Service](../saas-apps/palo-alto-networks-cloud-identity-enginecloud-authentication-service-tutorial.md), [Peripass](https://accounts.peripass.app/v1/sso/challenge), [JobDiva](https://www.jobssos.com/index_azad.jsp?SSO=AZURE&ID=1), [Sanebox For Office365](https://sanebox.com/login), [Tulip](../saas-apps/tulip-tutorial.md), [HP Wolf Security](https://bec-pocda37b439.bromium-online.com/gui/), [Genesys Engage cloud Email](https://login.microsoftonline.com/common/oauth2/authorize?prompt=consent&accessType=offline&state=07e035a7-6fb0-4411-afd9-efa46c9602f9&resource=https://graph.microsoft.com/&response_type=code&redirect_uri=https://iwd.api01-westus2.dev.genazure.com/iwd/v3/emails/oauth2/microsoft/callback&client_id=36cd21ab-862f-47c8-abb6-79facad09dda), [Meta Wiki](https://meta.dunkel.eu/), [Palo Alto Networks Cloud Identity Engine Directory Sync](https://directory-sync.us.paloaltonetworks.com/directory?instance=L2qoLVONpBHgdJp1M5K9S08Z7NBXlpi54pW1y3DDu2gQqdwKbyUGA11EgeaDfZ1dGwn397S8eP7EwQW3uyE4XL), [Valarea](https://www.valarea.com/en/download), [LanSchool Air](../saas-apps/lanschool-air-tutorial.md), [Catalyst](https://www.catalyst.org/sso-login/), [Webcargo](../saas-apps/webcargo-tutorial.md)
+[Siriux Customer Dashboard](https://portal.siriux.tech/login), [STRUXI](https://struxi.app/), [Autodesk Construction Cloud - Meetings](https://acc.autodesk.com/), [Eccentex AppBase for Azure](../saas-apps/eccentex-appbase-for-azure-tutorial.md), [Bookado](https://adminportal.bookado.io/), [FilingRamp](https://app.filingramp.com/login), [BenQ IAM](../saas-apps/benq-iam-tutorial.md), [Rhombus Systems](../saas-apps/rhombus-systems-tutorial.md), [CorporateExperience](../saas-apps/corporateexperience-tutorial.md), [TutorOcean](../saas-apps/tutorocean-tutorial.md), [Bookado Device](https://adminportal.bookado.io/), [HiFives-AD-SSO](https://app.hifives.in/login/azure), [Darzin](https://au.darzin.com/), [Simply Stakeholders](https://au.simplystakeholders.com/), [KACTUS HCM - Smart People](https://kactusspc.digitalware.co/), [Five9 UC Adapter for Microsoft Teams V2](https://uc.five9.net/?vendor=msteams), [Automation Center](https://automationcenter.cognizantgoc.com/portal/boot/signon), [Cirrus Identity Bridge for Azure AD](../saas-apps/cirrus-identity-bridge-for-azure-ad-tutorial.md), [ShiftWizard SAML](../saas-apps/shiftwizard-saml-tutorial.md), [Safesend Returns](https://www.safesendwebsites.com/), [Brushup](../saas-apps/brushup-tutorial.md), [directprint.io Cloud Print Administration](../saas-apps/directprint-io-cloud-print-administration-tutorial.md), [plain-x](https://app.plain-x.com/#/login),[X-point Cloud](../saas-apps/x-point-cloud-tutorial.md), [SmartHub INFER](../saas-apps/smarthub-infer-tutorial.md), [Fresh Relevance](../saas-apps/fresh-relevance-tutorial.md), [FluentPro G.A. Suite](https://gas.fluentpro.com/Account/SSOLogin?provider=Microsoft), [Clockwork Recruiting](../saas-apps/clockwork-recruiting-tutorial.md), [WalkMe SAML2.0](../saas-apps/walkme-saml-tutorial.md), [Sideways 6](https://app.sideways6.com/account/login?ReturnUrl=/), [Kronos Workforce Dimensions](../saas-apps/kronos-workforce-dimensions-tutorial.md), [SysTrack Cloud Edition](https://cloud.lakesidesoftware.com/Cloud/Account/Login), [mailworx Dynamics CRM Connector](https://www.mailworx.info/), [Palo Alto Networks Cloud Identity Engine - Cloud Authentication Service](../saas-apps/palo-alto-networks-cloud-identity-enginecloud-authentication-service-tutorial.md), [Peripass](https://accounts.peripass.app/v1/sso/challenge), [JobDiva](https://www.jobssos.com/index_azad.jsp?SSO=AZURE&ID=1), [Sanebox For Office365](https://sanebox.com/login), [Tulip](../saas-apps/tulip-tutorial.md), [HP Wolf Security](https://www.hpwolf.com/), [Genesys Engage cloud Email](https://login.microsoftonline.com/common/oauth2/authorize?prompt=consent&accessType=offline&state=07e035a7-6fb0-4411-afd9-efa46c9602f9&resource=https://graph.microsoft.com/&response_type=code&redirect_uri=https://iwd.api01-westus2.dev.genazure.com/iwd/v3/emails/oauth2/microsoft/callback&client_id=36cd21ab-862f-47c8-abb6-79facad09dda), [Meta Wiki](https://meta.dunkel.eu/), [Palo Alto Networks Cloud Identity Engine Directory Sync](https://directory-sync.us.paloaltonetworks.com/directory?instance=L2qoLVONpBHgdJp1M5K9S08Z7NBXlpi54pW1y3DDu2gQqdwKbyUGA11EgeaDfZ1dGwn397S8eP7EwQW3uyE4XL), [Valarea](https://www.valarea.com/en/download), [LanSchool Air](../saas-apps/lanschool-air-tutorial.md), [Catalyst](https://www.catalyst.org/sso-login/), [Webcargo](../saas-apps/webcargo-tutorial.md)
You can also find the documentation of all the applications here: https://aka.ms/AppsTutorial
active-directory Entitlement Management Access Package Create https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/governance/entitlement-management-access-package-create.md
On the **Basics** tab, you give the access package a name and specify which cata
![Access package - Basics](./media/entitlement-management-access-package-create/basics.png)
- If you are a Global administrator, an Identity Governance administrator, a User administrator, or catalog creator and you would like to create your access package in a new catalog that's not listed, click **Create new catalog**. Enter the Catalog name and description and then click **Create**.
+ If you are a Global administrator, an Identity Governance administrator, a User administrator, or catalog creator and you would like to create your access package in a new catalog that's not listed, click **Create new catalog**. Enter the Catalog name and description and then click **Create**.
- The access package you are creating and any resources included in it will be added to the new catalog. You can also add additional catalog owners later.
+ The access package you are creating and any resources included in it will be added to the new catalog. You can also add additional catalog owners later and add attributes to the resources you put in the catalog. Read [Add resource attributes in the catalog](entitlement-management-catalog-create.md#add-resource-attributes-in-the-catalog) to learn more about how to edit the attributes list for a specific catalog resource and the prerequisite roles.
1. Click **Next**.
active-directory Entitlement Management Access Reviews Create https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/governance/entitlement-management-access-reviews-create.md
This setting determines how often access reviews will occur.
1. Next to **Reviewers**, select **Self-review** if you want users to perform their own access review or select **Specific reviewer(s)** if you want to designate a reviewer. You can also select **Manager** if you want to designate the revieweeΓÇÖs manager to be the reviewer. If you select this option, you need to add a **fallback** to forward the review to in case the manager cannot be found in the system. 1. If you selected **Specific reviewer(s)**, specify which users will do the access review:+ ![Select Add reviewers](./media/entitlement-management-access-reviews/access-reviews-add-reviewer.png) 1. Select **Add reviewers**.
This setting determines how often access reviews will occur.
![Add the fallback reviewers](./media/entitlement-management-access-reviews/access-reviews-select-manager.png)
-1. Click **Review + Create** if you are creating a new access package or **Update** if you are editing an access package, at the bottom of the page.
+1. There are other advanced settings you can configure. To configure other advanced access review settings, click **Show advanced access review settings**:
+ 1. If you want specify what happens to users' access when a reviewer doesn't respond, click **If reviewers don't respond**, and then select one of the following:
+ - **No change** if you don't want a decision made on the users' access.
+ - **Remove access** if you want the users' access removed.
+ - **Take recommendations** if you want a decision to be made based on recommendations from MyAccess.
+
+ ![Add advanced access review settings](./media/entitlement-management-access-reviews/advanced-access-reviews.png)
+
+ 1. If you want to see system recommendations, click **Show reviewer decision helpers**. The system's recommendations are based on the users' activity. The reviewers will see one of the following recommendations:
+ - **approve** the review if the user has signed-in at least once during the last 30 days.
+ - **deny** the review if the user hasn't signed-in during the last 30 days.
+ 1. If you want the reviewer to share their reasons for their approval decision, click **Require reviewer justification**. Their justification is visible to other reviewers and the requestor.
+
+1. Click **Review + Create** or click **next** if you are creating a new access package. Click **Update** if you are editing an access package, at the bottom of the page.
## View the status of the access review
active-directory Entitlement Management Catalog Create https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/governance/entitlement-management-catalog-create.md
To add resources to a catalog:
These resources can now be included in access packages within the catalog.
-### Add resource attributes (preview) in the catalog
+### Add resource attributes in the catalog
Attributes are required fields that requestors will be asked to answer before they submit their access request. Their answers for these attributes will be shown to approvers and also stamped on the user object in Azure AD.
To require attributes for access requests:
1. Select **Resources** on the left menu, and a list of resources in the catalog appears.
-1. Select the ellipsis next to the resource where you want to add attributes, and then select **Require attributes (Preview)**.
+1. Select the ellipsis next to the resource where you want to add attributes, and then select **Require attributes**.
- ![Screenshot that shows selecting Require attributes (Preview).](./media/entitlement-management-catalog-create/resources-require-attributes.png)
+ ![Screenshot that shows selecting Require attributes](./media/entitlement-management-catalog-create/resources-require-attributes.png)
1. Select the attribute type:
To require attributes for access requests:
![Screenshot that shows saving the localizations.](./media/entitlement-management-catalog-create/attributes-add-localization.png)
-1. After all attribute information is completed on the **Require attributes (Preview)** page, select **Save**.
+1. After all attribute information is completed on the **Require attributes** page, select **Save**.
### Add a Multi-Geo SharePoint site
active-directory Reference Connect Version History https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/hybrid/reference-connect-version-history.md
ms.assetid: ef2797d7-d440-4a9a-a648-db32ad137494
Previously updated : 1/31/2022 Last updated : 3/24/2022
If you want all the latest features and updates, check this page and install wha
To read more about auto-upgrade, see [Azure AD Connect: Automatic upgrade](how-to-connect-install-automatic-upgrade.md).
+## 2.1.1.0
+
+### Release status
+3/24/2022: Released for download only, not available for auto upgrade
+
+### Bug fixes
+ - Fixed an issue where some sync rule functions were not parsing surrogate pairs properly.
+ - Fixed an issue where, under certain circumstances, the sync service would not start due to a model db corruption. You can read more about the model db corroption issue in [this article](https://docs.microsoft.com/troubleshoot/azure/active-directory/resolve-model-database-corruption-sqllocaldb)
+ ## 2.0.91.0 ### Release status
active-directory Add Application Portal Assign Users https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/manage-apps/add-application-portal-assign-users.md
Previously updated : 09/22/2021 Last updated : 03/24/2022
active-directory Add Application Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/manage-apps/add-application-portal.md
Previously updated : 09/22/2021 Last updated : 03/24/2022
active-directory Delete Application Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/manage-apps/delete-application-portal.md
Previously updated : 09/22/2021 Last updated : 03/24/2022
active-directory View Applications Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/manage-apps/view-applications-portal.md
Previously updated : 09/07/2021 Last updated : 03/24/2022
To view the enterprise applications registered in your tenant:
To search for a particular application:
-1. In the **Application Type** menu, select **All applications**, and choose **Apply**.
+1. Select the **Application Type** filter option. Select **All applications** from the **Application Type** drop-down menu, and choose **Apply**.
1. Enter the name of the application you want to find. If the application has been added to your Azure AD tenant, it appears in the search results. For example, you can search for the **Azure AD SAML Toolkit 1** application that is used in the previous quickstarts. 1. Try entering the first few letters of an application name.
To search for a particular application:
Select options according to what you're looking for:
-1. You can view the applications by **Application Type**, **Application Status**, and **Application visibility**.
+1. You can view the applications by **Application Type**, **Application Status**, and **Application visibility**. These three options are the default filters.
1. Under **Application Type**, choose one of these options: - **Enterprise Applications** shows non-Microsoft applications. - **Microsoft Applications** shows Microsoft applications.
Select options according to what you're looking for:
1. Under **Application Status**, choose **Any**, **Disabled**, or **Enabled**. The **Any** option includes both disabled and enabled applications. 1. Under **Application Visibility**, choose **Any**, or **Hidden**. The **Hidden** option shows applications that are in the tenant, but aren't visible to users. 1. After choosing the options you want, select **Apply**.
+1. Select **Add filters** to add more options for filtering the search results. The other that exist are:
+ - **Application ID**
+ - **Created on**
+ - **Assignment required**
+ - **Is App Proxy**
+ - **Owner**
+1. To remove any of the filter options already added, select the **X** icon next to the filter option.
+ ## Clean up resources
active-directory Yellowbox Provisioning Tutorial https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/active-directory/saas-apps/yellowbox-provisioning-tutorial.md
The scenario outlined in this tutorial assumes that you already have the followi
* [An Azure AD tenant](../develop/quickstart-create-new-tenant.md). * A user account in Azure AD with [permission](../roles/permissions-reference.md) to configure provisioning (for example, Application Administrator, Cloud Application administrator, Application Owner, or Global Administrator).
-* A user account in Yellowbox with Admin rights.
+* A Yellowbox issued JSON Web Token for authorization against the SCIM provisioning endpoint
## Step 1. Plan your provisioning deployment 1. Learn about [how the provisioning service works](../app-provisioning/user-provisioning.md).
The scenario outlined in this tutorial assumes that you already have the followi
1. Determine what data to [map between Azure AD and Yellowbox](../app-provisioning/customize-application-attributes.md). ## Step 2. Configure Yellowbox to support provisioning with Azure AD-
-Contact [Yellowbox Help desk](https://dashboard.yellowbox.app/#/help-desk) to obtain the SCIM Url and corresponding Token.
+* Use `https://australia-southeast1-yellowbox-f4c6e.cloudfunctions.net/scim` as the Tenant Url.
+* Obtain your JWT authorization Token from yellowbox by contacting [Yellowbox support](mailto:contact@yellowbox.app), if you haven't already been issued a token.
## Step 3. Add Yellowbox from the Azure AD application gallery
aks Azure Files Csi https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/aks/azure-files-csi.md
Title: Use Container Storage Interface (CSI) drivers for Azure Files on Azure Ku
description: Learn how to use the Container Storage Interface (CSI) drivers for Azure Files in an Azure Kubernetes Service (AKS) cluster. Previously updated : 03/22/2021 Last updated : 03/24/2021
storageclass.storage.k8s.io/my-azurefile created
The Azure Files CSI driver supports creating [snapshots of persistent volumes](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html) and the underlying file shares.
+> [!NOTE]
+> This driver only supports snapshot creation, restore from snapshot is not supported by this driver, snapshot could be restored from Azure portal or CLI. To get the snapshot created, you can go to Azure Portal -> access the Storage Account -> File shares -> access the file share associated -> Snapshots. There you can click on it and restore.
+ Create a [volume snapshot class](https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/deploy/example/snapshot/volumesnapshotclass-azurefile.yaml) with the [kubectl apply][kubectl-apply] command: ```console
aks Azure Files Volume https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/aks/azure-files-volume.md
You now have a running pod with an Azure Files share mounted at */mnt/azure*. Yo
## Mount file share as a persistent volume - Mount options
-> The default value for *fileMode* and *dirMode* is *0777* for Kubernetes version 1.15 and above.
+> The default value for *fileMode* and *dirMode* is *0777*.
```yaml apiVersion: v1
spec:
accessModes: - ReadWriteMany storageClassName: ""
+ volumeName: azurefile
resources: requests: storage: 5Gi
kubectl delete pod mypod
kubectl apply -f azure-files-pod.yaml ```
-## Using Azure tags
-
-For more details on using Azure tags, see [Use Azure tags in Azure Kubernetes Service (AKS)][use-tags].
- ## Next steps For Azure File CSI driver parameters, see [CSI driver parameters][CSI driver parameters].
aks Devops Pipeline https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/aks/devops-pipeline.md
Last updated 03/15/2022+ zone_pivot_groups: pipelines-version
aks Use Multiple Node Pools https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/aks/use-multiple-node-pools.md
Only pods that have this toleration applied can be scheduled on nodes in *taintn
### Setting nodepool labels
-You can also add labels to a node pool during node pool creation. Labels set at the node pool are added to each node in the node pool. These [labels are visible in Kubernetes][kubernetes-labels] for handling scheduling rules for nodes.
-
-To create a node pool with a label, use [az aks nodepool add][az-aks-nodepool-add]. Specify the name *labelnp* and use the `--labels` parameter to specify *dept=IT* and *costcenter=9999* for labels.
-
-```azurecli-interactive
-az aks nodepool add \
- --resource-group myResourceGroup \
- --cluster-name myAKSCluster \
- --name labelnp \
- --node-count 1 \
- --labels dept=IT costcenter=9999 \
- --no-wait
-```
-
-> [!NOTE]
-> Labels must be a key/value pair and have a [valid syntax][kubernetes-label-syntax].
-
-The following example output from the [az aks nodepool list][az-aks-nodepool-list] command shows that *labelnp* is *Creating* nodes with the specified *nodeLabels*:
-
-```azurecli
-az aks nodepool list -g myResourceGroup --cluster-name myAKSCluster
-
-```output
-[
- {
- ...
- "count": 1,
- ...
- "name": "labelnp",
- "orchestratorVersion": "1.15.7",
- ...
- "provisioningState": "Creating",
- ...
- "nodeLabels": {
- "dept": "IT",
- "costcenter": "9999"
- },
- ...
- },
- ...
-]
-```
+For more details on using labels with node pools, see [Use labels in an Azure Kubernetes Service (AKS) cluster][use-labels].
### Setting nodepool Azure tags
Use [proximity placement groups][reduce-latency-ppg] to reduce latency for your
[node-image-upgrade]: node-image-upgrade.md [fips]: /azure/compliance/offerings/offering-fips-140-2 [use-tags]: use-tags.md
+[use-labels]: use-labels.md
api-management Configure Custom Domain https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/api-management/configure-custom-domain.md
API Management offers a free, managed TLS certificate for your domain, if you do
* Not supported in the following Azure regions: France South and South Africa West * Currently available only in the Azure cloud * Does not support root domain names (for example, `contoso.com`). Requires a fully qualified name such as `api.contoso.com`.
+* Can only be configured when updating an existing API Management instance, not when creating an instance
## Set a custom domain name - portal
app-service Tutorial Java Spring Cosmosdb https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/app-service/tutorial-java-spring-cosmosdb.md
Open the `pom.xml` file in the `initial/spring-boot-todo` directory and add the
<plugin> <groupId>com.microsoft.azure</groupId> <artifactId>azure-webapp-maven-plugin</artifactId>
- <version>2.2.3</version>
+ <version>2.5.0</version>
<configuration> <schemaVersion>v2</schemaVersion>
bash-3.2$ mvn azure-webapp:deploy
[INFO] Building spring-todo-app 2.0-SNAPSHOT [INFO] [INFO]
-[INFO] azure-webapp-maven-plugin:2.2.3:deploy (default-cli) @ spring-todo-app
+[INFO] azure-webapp-maven-plugin:2.5.0:deploy (default-cli) @ spring-todo-app
Auth Type: AZURE_CLI Default subscription: xxxxxxxxx Username: xxxxxxxxx
availability-zones Az Region https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/availability-zones/az-region.md
description: Learn what services are supported by availability zones and underst
Previously updated : 12/10/2021 Last updated : 03/25/2022
Azure provides the most extensive global footprint of any cloud provider and is
| East US 2 | UK South | | Southeast Asia | | South Central US | West Europe | | East Asia | | US Gov Virginia | Sweden Central | | China North 3 |
-| West US 2 | | | |
+| West US 2 | Switzerland North* | | |
| West US 3 | | | |
+\* To learn more about Availability Zones and available services support in these regions, contact your Microsoft sales or customer
+representative. For the upcoming regions that will support Availability Zones, see [Azure geographies](https://azure.microsoft.com/global-infrastructure/geographies/).
+ For a list of Azure services that support availability zones by Azure region, see the [availability zones documentation](az-overview.md). ## Highly available services
azure-arc Managed Instance High Availability https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-arc/data/managed-instance-high-availability.md
Additional steps are required to restore a database into an availability group.
Add the database backup file into the primary instance container. ```console
- kubectl cp <source file location> <pod name>:var/opt/mssql/data/<file name> -n <namespace name>
+ kubectl cp <source file location> <pod name>:var/opt/mssql/data/<file name> -c <serviceName> -n <namespaceName>
``` Example
azure-cache-for-redis Cache Go Get Started https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-cache-for-redis/cache-go-get-started.md
If you want to skip straight to the code, see the [Go quickstart](https://github
## Prerequisites - Azure subscription - [create one for free](https://azure.microsoft.com/free/)-- [Go](https://golang.org/doc/install) (preferably version 1.13 or above)
+- [Go](https://go.dev/doc/install) (preferably version 1.13 or above)
- [Git](https://git-scm.com/downloads) - An HTTP client such [curl](https://curl.se/)
func main() {
... ```
-Then, we establish connection with Azure Cache for Redis. We use [tls.Config](https://golang.org/pkg/crypto/tls/#Config)--Azure Cache for Redis only accepts secure connections with [TLS 1.2 as the minimum required version](cache-remove-tls-10-11.md).
+Then, we establish connection with Azure Cache for Redis. We use [tls.Config](https://go.dev/pkg/crypto/tls/#Config)--Azure Cache for Redis only accepts secure connections with [TLS 1.2 as the minimum required version](cache-remove-tls-10-11.md).
```go ...
if err != nil {
... ```
-If the connection is successful, [HTTP handlers](https://golang.org/pkg/net/http/#HandleFunc) are configured to handle `POST` and `GET` operations and the HTTP server is started.
+If the connection is successful, [HTTP handlers](https://go.dev/pkg/net/http/#HandleFunc) are configured to handle `POST` and `GET` operations and the HTTP server is started.
> [!NOTE] > [gorilla mux library](https://github.com/gorilla/mux) is used for routing (although it's not strictly necessary and we could have gotten away by using the standard library for this sample application).
azure-functions Create First Function Vs Code Other https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/create-first-function-vs-code-other.md
Before you get started, make sure you have the following requirements in place:
+ The [Azure Functions Core Tools](./functions-run-local.md#v2) version 3.x. Use the `func --version` command to check that it is correctly installed.
-+ [Go](https://golang.org/doc/install), latest version recommended. Use the `go version` command to check your version.
++ [Go](https://go.dev/doc/install), latest version recommended. Use the `go version` command to check your version. # [Rust](#tab/rust)
azure-functions Functions How To Azure Devops https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/functions-how-to-azure-devops.md
Last updated 02/25/2022 -+ ms.devlang: azurecli
azure-functions Functions How To Use Azure Function App Settings https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/functions-how-to-use-azure-function-app-settings.md
Use the following procedure to migrate from a Premium plan to a Consumption plan
1. Run the following command to migrate the existing function app to the new Consumption plan. ```azurecli-interactive
- az functionapp update --name <MY_APP_NAME> --resource-group <MY_RESOURCE_GROUP> --plan <NEW_CONSUMPTION_PLAN>
+ az functionapp update --name <MY_APP_NAME> --resource-group <MY_RESOURCE_GROUP> --plan <NEW_CONSUMPTION_PLAN> --force
``` 1. Delete the function app you created in step 1, since you only need the plan that was created to run the existing function app.
azure-functions Ip Addresses https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/ip-addresses.md
For full control over the IP addresses, both inbound and outbound, we recommend
To find out if your function app runs in an App Service Environment:
-# [Azure Porta](#tab/portal)
+# [Azure portal](#tab/portal)
1. Sign in to the [Azure portal](https://portal.azure.com). 2. Navigate to the function app.
azure-functions Functions Cli Create App Service Plan https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-app-service-plan.md
Title: Create a Function App in an App Service plan - Azure CLI
description: Azure CLI Script Sample - Create a Function App in an App Service plan ms.assetid: 0e221db6-ee2d-4e16-9bf6-a456cd05b6e7 Previously updated : 07/03/2018 Last updated : 03/24/2022 # Create a Function App in an App Service plan
This Azure Functions sample script creates a function app, which is a container
[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This script creates an Azure Function app using a dedicated [App Service plan](../dedicated-plan.md).
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/create-function-app-app-service-plan/create-function-app-app-service-plan.sh "Create an Azure Function on an App Service plan")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
Each command in the table links to command specific documentation. This script uses the following commands:
azure-functions Functions Cli Create Function App Connect To Cosmos Db https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-function-app-connect-to-cosmos-db.md
Title: Create a function app with Azure Cosmos DB - Azure CLI description: Azure CLI Script Sample - Create an Azure Function that connects to an Azure Cosmos DB Previously updated : 07/03/2018 Last updated : 03/24/2022 # Create an Azure Function that connects to an Azure Cosmos DB
-This Azure Functions sample script creates a function app and connects the function to an Azure Cosmos DB database. The created app setting that contains the connection can be used with an [Azure Cosmos DB trigger or binding](../functions-bindings-cosmosdb.md).
+This Azure Functions sample script creates a function app and connects the function to an Azure Cosmos DB database. It makes the connection using a Azure Cosmos DB endpoint and access key that it adds to app settings. The created app setting that contains the connection can be used with an [Azure Cosmos DB trigger or binding](../functions-bindings-cosmosdb.md).
[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This sample creates an Azure Function app and adds a Cosmos DB endpoint and access key to app settings.
+
+### Run the script
+
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/create-function-app-connect-to-cosmos-db/create-function-app-connect-to-cosmos-db.sh "Create an Azure Function that connects to an Azure Cosmos DB")]
+## Clean up resources
-## Script explanation
+```azurecli
+az group delete --name $resourceGroup
+```
-This script uses the following commands: Each command in the table links to command specific documentation.
+## Sample reference
| Command | Notes | |||
This script uses the following commands: Each command in the table links to comm
For more information on the Azure CLI, see [Azure CLI documentation](/cli/azure).
-Additional Azure Functions CLI script samples can be found in the [Azure Functions documentation](../functions-cli-samples.md).
+More Azure Functions CLI script samples can be found in the [Azure Functions documentation](../functions-cli-samples.md).
azure-functions Functions Cli Create Function App Connect To Storage Account https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-function-app-connect-to-storage-account.md
Title: Create a function app with connected storage - Azure CLI description: Azure CLI Script Sample - Create an Azure Function that connects to an Azure Storage Previously updated : 04/20/2017 Last updated : 03/24/2022
-# Create a function app with a named Storage account connection
+# Create a function app with a named Storage account connection
-This Azure Functions sample script creates a function app and connects the function to an Azure Storage account. The created app setting that contains the connection can be used with a [storage trigger or binding](../functions-bindings-storage-blob.md).
+This Azure Functions sample script creates a function app and connects the function to an Azure Storage account. The created app setting that contains the storage connection string can be used with a [storage trigger or binding](../functions-bindings-storage-blob.md).
[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This sample creates an Azure Function app and adds the storage connection string to an app setting.
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/create-function-app-connect-to-storage/create-function-app-connect-to-storage-account.sh "Integrate Function App into Azure Storage Account")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
This script uses the following commands. Each command in the table links to command specific documentation.
azure-functions Functions Cli Create Function App Github Continuous https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-function-app-github-continuous.md
Title: Create a function app with GitHub deployment - Azure CLI description: Create a function app and deploy function code from a GitHub repository using Azure Functions. Previously updated : 07/03/2018 Last updated : 03/24/2022 # Create a function app in Azure that is deployed from GitHub
-This Azure Functions sample script creates a function app using the [Consumption plan](../consumption-plan.md), along with its related resources. The script also configures your function code for continuous deployment from a GitHub repository.
-
-In this sample, you need:
-
-* A GitHub repository with functions code, that you have administrative permissions for.
-* A [Personal Access Token (PAT)](https://help.github.com/articles/creating-an-access-token-for-command-line-use) for your GitHub account.
+This Azure Functions sample script creates a function app using the [Consumption plan](../consumption-plan.md), along with its related resources. The script also configures your function code for continuous deployment from a public GitHub repository. There is also commented out code for using a private GitHub repository.
[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This sample creates an Azure Function app and deploys function code from GitHub.
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/deploy-function-app-with-function-github-continuous/deploy-function-app-with-function-github-continuous.sh?highlight=3-4 "Azure Service")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
Each command in the table links to command specific documentation. This script uses the following commands:
azure-functions Functions Cli Create Function App Vsts Continuous https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-function-app-vsts-continuous.md
- Title: Create a function app with DevOps deployment - Azure CLI
-description: Create a Function App and deploy function code from Azure DevOps
Previously updated : 07/03/2018---
-# Create a function in Azure that is deployed from Azure DevOps
-
-This topic shows you how to use Azure Functions to create a [serverless](https://azure.microsoft.com/solutions/serverless/) function app using the [Consumption plan](../consumption-plan.md). The function app, which is a container for your functions, is continuously deployed from an Azure DevOps repository.
-
-To complete this topic, you must have:
-
-* An Azure DevOps repository that contains your function app project and to which you have administrative permissions.
-* A [personal access token (PAT)](/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate) to access your Azure DevOps repository.
----
-## Sample script
-
-This sample creates an Azure Function app and deploys function code from Azure DevOps.
-
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/deploy-function-app-with-function-vsts/deploy-function-app-with-function-vsts.sh?highlight=3-4 "Azure Service")]
--
-## Script explanation
-
-This script uses the following commands to create a resource group, storage account, function app, and all related resources. Each command in the table links to command specific documentation.
-
-| Command | Notes |
-|||
-| [az group create](/cli/azure/group#az-group-create) | Creates a resource group in which all resources are stored. |
-| [az storage account create](/cli/azure/storage/account#az-storage-account-create) | Creates the storage account required by the function app. |
-| [az functionapp create](/cli/azure/functionapp#az-functionapp-create) | Creates a function app in the serverless [Consumption plan](../consumption-plan.md). |
-| [az functionapp deployment source config](/cli/azure/functionapp/deployment/source#az-functionapp-deployment-source-config) | Associates a function app with a Git or Mercurial repository. |
-
-## Next steps
-
-For more information on the Azure CLI, see [Azure CLI documentation](/cli/azure).
-
-Additional Azure Functions CLI script samples can be found in the [Azure Functions documentation](../functions-cli-samples.md).
azure-functions Functions Cli Create Premium Plan https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-premium-plan.md
Title: Create a function app in a Premium plan - Azure CLI
description: Create a function app in a scalable Premium plan in Azure using the Azure CLI Previously updated : 11/23/2019 Last updated : 03/24/2022
This Azure Functions sample script creates a function app, which is a container
[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This script creates a function app using a [Premium plan](../functions-premium-plan.md).
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/create-function-app-premium-plan/create-function-app-premium-plan.sh "Create an Azure Function on an App Service plan")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
Each command in the table links to command specific documentation. This script uses the following commands:
azure-functions Functions Cli Create Serverless Python https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-serverless-python.md
Title: Create a serverless Python function app - Azure CLI
description: Create a serverless Python function app using the Azure CLI ms.assetid: 0e221db6-ee2d-4e16-9bf6-a456cd05b6e7 Previously updated : 11/23/2019 Last updated : 03/24/2022 # Create a serverless Python function app using Azure CLI
-This Azure Functions sample script creates a function app, which is a container for your functions.
+This Azure Functions sample script creates a function app, which is a container for your functions. This script creates an Azure Function app using the [Consumption plan](../consumption-plan.md).
>[!NOTE]
->The function app created runs on Python version 3.6. Python version 3.7 is also supported by Azure Functions.
+>The function app created runs on Python version 3.9. Python version 3.7 and 3.8 are also supported by Azure Functions.
[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This script creates an Azure Function app using the [Consumption plan](../consumption-plan.md).
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/create-function-app-consumption-python/create-function-app-consumption-python.sh "Create an Azure Function on a Consumption plan")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
Each command in the table links to command specific documentation. This script uses the following commands:
azure-functions Functions Cli Create Serverless https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-create-serverless.md
Title: Create a serverless function app using the Azure CLI
description: Create a function app for serverless execution in Azure using the Azure CLI. ms.assetid: 0e221db6-ee2d-4e16-9bf6-a456cd05b6e7 Previously updated : 07/03/2018 Last updated : 03/24/2022
-# Create a function app for serverless code execution
+# Create a function app for serverless code execution
This Azure Functions sample script creates a function app, which is a container for your functions. The function app is created using the [Consumption plan](../consumption-plan.md), which is ideal for event-driven serverless workloads.
This Azure Functions sample script creates a function app, which is a container
[!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This script creates an Azure Function app using the [Consumption plan](../consumption-plan.md).
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/create-function-app-consumption/create-function-app-consumption.sh "Create an Azure Function on a Consumption plan")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
Each command in the table links to command specific documentation. This script uses the following commands:
azure-functions Functions Cli Mount Files Storage Linux https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-functions/scripts/functions-cli-mount-files-storage-linux.md
Title: Mount a file share to a Python function app - Azure CLI description: Create a serverless Python function app and mount an existing file share using the Azure CLI. Previously updated : 03/01/2020 Last updated : 03/24/2022 # Mount a file share to a Python function app using Azure CLI
-This Azure Functions sample script creates a function app and creates a share in Azure Files. It then mounts the share so that the data can be accessed by your functions.
+This Azure Functions sample script creates a function app using the [Consumption plan](../consumption-plan.md)and creates a share in Azure Files. It then mounts the share so that the data can be accessed by your functions.
>[!NOTE]
->The function app created runs on Python version 3.7. Azure Functions also [supports Python versions 3.6 and 3.8](../functions-reference-python.md#python-version).
+>The function app created runs on Python version 3.9. Azure Functions also [supports Python versions 3.7 and 3.8](../functions-reference-python.md#python-version).
[!INCLUDE [quickstarts-free-trial-note](../../../includes/quickstarts-free-trial-note.md)] [!INCLUDE [azure-cli-prepare-your-environment.md](../../../includes/azure-cli-prepare-your-environment.md)] - ## Sample script
-This script creates a function app in Azure Functions using the [Consumption plan](../consumption-plan.md).
+
+### Run the script
++
+## Clean up resources
-[!code-azurecli-interactive[main](../../../cli_scripts/azure-functions/functions-cli-mount-files-storage-linux/functions-cli-mount-files-storage-linux.sh "Create a function app on a Consumption plan")]
+```azurecli
+az group delete --name $resourceGroup
+```
-## Script explanation
+## Sample reference
Each command in the table links to command specific documentation. This script uses the following commands:
azure-monitor Java In Process Agent https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-monitor/app/java-in-process-agent.md
This section shows you how to download the auto-instrumentation jar file.
#### Download the jar file
-Download the [applicationinsights-agent-3.2.8.jar](https://github.com/microsoft/ApplicationInsights-Java/releases/download/3.2.8/applicationinsights-agent-3.2.8.jar) file.
+Download the [applicationinsights-agent-3.2.9.jar](https://github.com/microsoft/ApplicationInsights-Java/releases/download/3.2.9/applicationinsights-agent-3.2.9.jar) file.
> [!WARNING] >
Download the [applicationinsights-agent-3.2.8.jar](https://github.com/microsoft/
#### Point the JVM to the jar file
-Add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to your application's JVM args.
+Add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to your application's JVM args.
> [!TIP] > For help with configuring your application's JVM args, see [Tips for updating your JVM args](./java-standalone-arguments.md).
Add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to your application
APPLICATIONINSIGHTS_CONNECTION_STRING=InstrumentationKey=... ```
- - Or you can create a configuration file named `applicationinsights.json`. Place it in the same directory as `applicationinsights-agent-3.2.8.jar` with the following content:
+ - Or you can create a configuration file named `applicationinsights.json`. Place it in the same directory as `applicationinsights-agent-3.2.9.jar` with the following content:
```json {
azure-monitor Java Standalone Arguments https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-monitor/app/java-standalone-arguments.md
Configure [App Services](../../app-service/configure-language-java.md#set-java-r
## Spring Boot
-Add the JVM arg `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` somewhere before `-jar`, for example:
+Add the JVM arg `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` somewhere before `-jar`, for example:
```
-java -javaagent:path/to/applicationinsights-agent-3.2.8.jar -jar <myapp.jar>
+java -javaagent:path/to/applicationinsights-agent-3.2.9.jar -jar <myapp.jar>
``` ## Spring Boot via Docker entry point
-If you're using the *exec* form, add the parameter `"-javaagent:path/to/applicationinsights-agent-3.2.8.jar"` to the parameter list somewhere before the `"-jar"` parameter, for example:
+If you're using the *exec* form, add the parameter `"-javaagent:path/to/applicationinsights-agent-3.2.9.jar"` to the parameter list somewhere before the `"-jar"` parameter, for example:
```
-ENTRYPOINT ["java", "-javaagent:path/to/applicationinsights-agent-3.2.8.jar", "-jar", "<myapp.jar>"]
+ENTRYPOINT ["java", "-javaagent:path/to/applicationinsights-agent-3.2.9.jar", "-jar", "<myapp.jar>"]
```
-If you're using the *shell* form, add the JVM arg `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` somewhere before `-jar`, for example:
+If you're using the *shell* form, add the JVM arg `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` somewhere before `-jar`, for example:
```
-ENTRYPOINT java -javaagent:path/to/applicationinsights-agent-3.2.8.jar -jar <myapp.jar>
+ENTRYPOINT java -javaagent:path/to/applicationinsights-agent-3.2.9.jar -jar <myapp.jar>
``` ## Tomcat 8 (Linux)
ENTRYPOINT java -javaagent:path/to/applicationinsights-agent-3.2.8.jar -jar <mya
If you installed Tomcat via `apt-get` or `yum`, then you should have a file `/etc/tomcat8/tomcat8.conf`. Add this line to the end of that file: ```
-JAVA_OPTS="$JAVA_OPTS -javaagent:path/to/applicationinsights-agent-3.2.8.jar"
+JAVA_OPTS="$JAVA_OPTS -javaagent:path/to/applicationinsights-agent-3.2.9.jar"
``` ### Tomcat installed via download and unzip
JAVA_OPTS="$JAVA_OPTS -javaagent:path/to/applicationinsights-agent-3.2.8.jar"
If you installed Tomcat via download and unzip from [https://tomcat.apache.org](https://tomcat.apache.org), then you should have a file `<tomcat>/bin/catalina.sh`. Create a new file in the same directory named `<tomcat>/bin/setenv.sh` with the following content: ```
-CATALINA_OPTS="$CATALINA_OPTS -javaagent:path/to/applicationinsights-agent-3.2.8.jar"
+CATALINA_OPTS="$CATALINA_OPTS -javaagent:path/to/applicationinsights-agent-3.2.9.jar"
```
-If the file `<tomcat>/bin/setenv.sh` already exists, then modify that file and add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to `CATALINA_OPTS`.
+If the file `<tomcat>/bin/setenv.sh` already exists, then modify that file and add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to `CATALINA_OPTS`.
## Tomcat 8 (Windows)
If the file `<tomcat>/bin/setenv.sh` already exists, then modify that file and a
Locate the file `<tomcat>/bin/catalina.bat`. Create a new file in the same directory named `<tomcat>/bin/setenv.bat` with the following content: ```
-set CATALINA_OPTS=%CATALINA_OPTS% -javaagent:path/to/applicationinsights-agent-3.2.8.jar
+set CATALINA_OPTS=%CATALINA_OPTS% -javaagent:path/to/applicationinsights-agent-3.2.9.jar
``` Quotes aren't necessary, but if you want to include them, the proper placement is: ```
-set "CATALINA_OPTS=%CATALINA_OPTS% -javaagent:path/to/applicationinsights-agent-3.2.8.jar"
+set "CATALINA_OPTS=%CATALINA_OPTS% -javaagent:path/to/applicationinsights-agent-3.2.9.jar"
```
-If the file `<tomcat>/bin/setenv.bat` already exists, just modify that file and add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to `CATALINA_OPTS`.
+If the file `<tomcat>/bin/setenv.bat` already exists, just modify that file and add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to `CATALINA_OPTS`.
### Running Tomcat as a Windows service
-Locate the file `<tomcat>/bin/tomcat8w.exe`. Run that executable and add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to the `Java Options` under the `Java` tab.
+Locate the file `<tomcat>/bin/tomcat8w.exe`. Run that executable and add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to the `Java Options` under the `Java` tab.
## JBoss EAP 7 ### Standalone server
-Add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to the existing `JAVA_OPTS` environment variable in the file `JBOSS_HOME/bin/standalone.conf` (Linux) or `JBOSS_HOME/bin/standalone.conf.bat` (Windows):
+Add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to the existing `JAVA_OPTS` environment variable in the file `JBOSS_HOME/bin/standalone.conf` (Linux) or `JBOSS_HOME/bin/standalone.conf.bat` (Windows):
```java ...
- JAVA_OPTS="-javaagent:path/to/applicationinsights-agent-3.2.8.jar -Xms1303m -Xmx1303m ..."
+ JAVA_OPTS="-javaagent:path/to/applicationinsights-agent-3.2.9.jar -Xms1303m -Xmx1303m ..."
... ``` ### Domain server
-Add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to the existing `jvm-options` in `JBOSS_HOME/domain/configuration/host.xml`:
+Add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to the existing `jvm-options` in `JBOSS_HOME/domain/configuration/host.xml`:
```xml ...
Add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to the existing `jv
<jvm-options> <option value="-server"/> <!--Add Java agent jar file here-->
- <option value="-javaagent:path/to/applicationinsights-agent-3.2.8.jar"/>
+ <option value="-javaagent:path/to/applicationinsights-agent-3.2.9.jar"/>
<option value="-XX:MetaspaceSize=96m"/> <option value="-XX:MaxMetaspaceSize=256m"/> </jvm-options>
Add these lines to `start.ini`
``` --exec--javaagent:path/to/applicationinsights-agent-3.2.8.jar
+-javaagent:path/to/applicationinsights-agent-3.2.9.jar
``` ## Payara 5
-Add `-javaagent:path/to/applicationinsights-agent-3.2.8.jar` to the existing `jvm-options` in `glassfish/domains/domain1/config/domain.xml`:
+Add `-javaagent:path/to/applicationinsights-agent-3.2.9.jar` to the existing `jvm-options` in `glassfish/domains/domain1/config/domain.xml`:
```xml ... <java-config ...> <!--Edit the JVM options here--> <jvm-options>
- -javaagent:path/to/applicationinsights-agent-3.2.8.jar>
+ -javaagent:path/to/applicationinsights-agent-3.2.9.jar>
</jvm-options> ... </java-config>
Java and Process Management > Process definition > Java Virtual Machine
``` In "Generic JVM arguments" add the following: ```--javaagent:path/to/applicationinsights-agent-3.2.8.jar
+-javaagent:path/to/applicationinsights-agent-3.2.9.jar
``` After that, save and restart the application server.
After that, save and restart the application server.
Create a new file `jvm.options` in the server directory (for example `<openliberty>/usr/servers/defaultServer`), and add this line: ```--javaagent:path/to/applicationinsights-agent-3.2.8.jar
+-javaagent:path/to/applicationinsights-agent-3.2.9.jar
``` ## Others
azure-monitor Java Standalone Config https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-monitor/app/java-standalone-config.md
You will find more details and additional configuration options below.
## Configuration file path
-By default, Application Insights Java 3.x expects the configuration file to be named `applicationinsights.json`, and to be located in the same directory as `applicationinsights-agent-3.2.8.jar`.
+By default, Application Insights Java 3.x expects the configuration file to be named `applicationinsights.json`, and to be located in the same directory as `applicationinsights-agent-3.2.9.jar`.
You can specify your own configuration file path using either * `APPLICATIONINSIGHTS_CONFIGURATION_FILE` environment variable, or * `applicationinsights.configuration.file` Java system property
-If you specify a relative path, it will be resolved relative to the directory where `applicationinsights-agent-3.2.8.jar` is located.
+If you specify a relative path, it will be resolved relative to the directory where `applicationinsights-agent-3.2.9.jar` is located.
Alternatively, instead of using a configuration file, you can specify the entire _content_ of the json configuration via the environment variable `APPLICATIONINSIGHTS_CONFIGURATION_CONTENT`.
You can also set the connection string using the environment variable `APPLICATI
You can also set the connection string by specifying a file to load the connection string from.
-If you specify a relative path, it will be resolved relative to the directory where `applicationinsights-agent-3.2.8.jar` is located.
+If you specify a relative path, it will be resolved relative to the directory where `applicationinsights-agent-3.2.9.jar` is located.
```json {
To disable auto-collection of Micrometer metrics (including Spring Boot Actuator
## HTTP headers
-Starting from 3.2.8, you can capture request and response headers on your server (request) telemetry:
+Starting from 3.2.9, you can capture request and response headers on your server (request) telemetry:
```json {
Again, the header names are case-insensitive, and the examples above will be cap
By default, http server requests that result in 4xx response codes are captured as errors.
-Starting from version 3.2.8, you can change this behavior to capture them as success if you prefer:
+Starting from version 3.2.9, you can change this behavior to capture them as success if you prefer:
```json {
Starting from version 3.2.0, the following preview instrumentations can be enabl
``` > [!NOTE] > Akka instrumentation is available starting from version 3.2.2
-> Vertx HTTP Library instrumentation is available starting from version 3.2.8
+> Vertx HTTP Library instrumentation is available starting from version 3.2.9
## Metric interval
and the console, corresponding to this configuration:
`level` can be one of `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, or `TRACE`. `path` can be an absolute or relative path. Relative paths are resolved against the directory where
-`applicationinsights-agent-3.2.8.jar` is located.
+`applicationinsights-agent-3.2.9.jar` is located.
`maxSizeMb` is the max size of the log file before it rolls over.
azure-monitor Java Standalone Troubleshoot https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-monitor/app/java-standalone-troubleshoot.md
In this article, we cover some of the common issues that you might face while in
## Check the self-diagnostic log file By default, Application Insights Java 3.x produces a log file named `applicationinsights.log` in the same directory
-that holds the `applicationinsights-agent-3.2.8.jar` file.
+that holds the `applicationinsights-agent-3.2.9.jar` file.
This log file is the first place to check for hints to any issues you might be experiencing. If no log file is generated, check that your Java application has write permission to the directory that holds the
-`applicationinsights-agent-3.2.8.jar` file.
+`applicationinsights-agent-3.2.9.jar` file.
If still no log file is generated, check the stdout log from your Java application. Application Insights Java 3.x should log any errors to stdout that would prevent it from logging to its normal location.
azure-monitor Standard Metrics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-monitor/app/standard-metrics.md
The metric shows how much of the total processor capacity is consumed by the pro
|||| |Percentage|Average, Max, Min| `Cloud role instance` |
+> [!NOTE]
+> The range of the metric is between 0 and 100 * n, where n is the number of available CPU cores. For example, the metric value of 200% could represent full utilization of two CPU core or half utilization of 4 CPU cores and so on. The *Process CPU Normalized* is an alternative metric collected by many SDKs which represents the same value but divides it by the number of available CPU cores. Thus, the range of *Process CPU Normalized* metric is 0 through 100.
### Process IO rate (performanceCounters/processIOBytesPerSecond)
azure-monitor App Insights Metrics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-monitor/essentials/app-insights-metrics.md
performanceCounters
| render timechart ```
+> [!NOTE]
+> The range of the metric is between 0 and 100 * n, where n is the number of available CPU cores. For example, the metric value of 200% could represent full utilization of two CPU core or half utilization of 4 CPU cores and so on. The *Process CPU Normalized* is an alternative metric collected by many SDKs which represents the same value but divides it by the number of available CPU cores. Thus, the range of *Process CPU Normalized* metric is 0 through 100.
+ ### Process IO rate (performanceCounters/processIOBytesPerSecond) |Unit of measure|Supported aggregations|Supported dimensions|
union traces, requests, pageViews, dependencies, customEvents, availabilityResul
| where notempty(user_AuthenticatedId) | summarize dcount(user_AuthenticatedId) by bin(timestamp, 1h) | render barchart
-```
+```
azure-netapp-files Modify Active Directory Connections https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-netapp-files/modify-active-directory-connections.md
Once you have [created an Active Directory connection](create-active-directory-c
| User DN | User domain name, which overrides the base DN for user lookups Nested userDN can be specified in `OU=subdirectory, OU=directory, DC=domain, DC=com` format.​ | Yes | None* | User search scope gets limited to User DN instead of base DN. | | Group DN | Group domain name. groupDN overrides the base DN for group lookups. Nested groupDN can be specified in `OU=subdirectory, OU=directory, DC=domain, DC=com` format.​ | Yes | None* | Group search scope gets limited to Group DN instead of base DN. | | Group Membership Filter | The custom LDAP search filter to be used when looking up group membership from LDAP server.​ `groupMembershipFilter` can be specified with the `(gidNumber=*)` format. | Yes | None* | Group membership filter will be used while querying group membership of a user from LDAP server. |
-| Security Privilege Users | You can grant security privilege (`SeSecurityPrivilege`) to users that require elevated privilege to access the Azure NetApp Files volumes. The specified user accounts will be allowed to perform certain actions on Azure NetApp Files SMB shares that require security privilege not assigned by default to domain users. See [Create and manage Active Directory connections](create-active-directory-connections.md#create-an-active-directory-connection) for more information. | Yes | Using this feature is optional and supported only for SQL Server. The domain account used for installing SQL Server must already exist before you add it to the Security privilege users field. When you add the SQL Server installer's account to Security privilege users, the Azure NetApp Files service might validate the account by contacting the domain controller. The command might fail if it cannot contact the domain controller. For more information about `SeSecurityPrivilege` and SQL Server, see [SQL Server installation fails if the Setup account doesn't have certain user rights](/troubleshoot/sql/install/installation-fails-if-remove-user-right.md).* | Allows non-administrator accounts to use SQL severs on top of ANF volumes. |
+| Security Privilege Users | You can grant security privilege (`SeSecurityPrivilege`) to users that require elevated privilege to access the Azure NetApp Files volumes. The specified user accounts will be allowed to perform certain actions on Azure NetApp Files SMB shares that require security privilege not assigned by default to domain users. See [Create and manage Active Directory connections](create-active-directory-connections.md#create-an-active-directory-connection) for more information. | Yes | Using this feature is optional and supported only for SQL Server. The domain account used for installing SQL Server must already exist before you add it to the Security privilege users field. When you add the SQL Server installer's account to Security privilege users, the Azure NetApp Files service might validate the account by contacting the domain controller. The command might fail if it cannot contact the domain controller. For more information about `SeSecurityPrivilege` and SQL Server, see [SQL Server installation fails if the Setup account doesn't have certain user rights](/troubleshoot/sql/install/installation-fails-if-remove-user-right).* | Allows non-administrator accounts to use SQL severs on top of ANF volumes. |
**\*There is no impact on a modified entry only if the modifications are entered correctly. If you enter data incorrectly, users and applications will lose access.**
Once you have [created an Active Directory connection](create-active-directory-c
* [Configure ADDS LDAP with extended groups for NFS](configure-ldap-extended-groups.md) * [Configure ADDS LDAP over TLS](configure-ldap-over-tls.md)
-* [Create and manage Active Directory connections](create-active-directory-connections.md)
+* [Create and manage Active Directory connections](create-active-directory-connections.md)
azure-percept Azure Percept Devkit Software Release Notes https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-percept/azure-percept-devkit-software-release-notes.md
This page provides information of changes and fixes for each Azure Percept DK OS
To download the update images, refer to [Azure Percept DK software releases for USB cable update](./software-releases-usb-cable-updates.md) or [Azure Percept DK software releases for OTA update](./software-releases-over-the-air-updates.md).
+## March (2203) Release
+
+- Operating System
+ - Latest security fixes.
+
## February (2202) Release - Operating System
azure-percept Software Releases Over The Air Updates https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-percept/software-releases-over-the-air-updates.md
Microsoft would service each dev kit release with OTA packages. However, as ther
## Identify the OTA package(s) to be deployed >[!IMPORTANT]
->If the current version of you dev kit isn't included in any of the releases below, it's NOT supported for OTA update. Please do a USB cable update to get to the latest version.
+>If the current version of your dev kit isn't included in any of the releases below, it's NOT supported for OTA update. Please do a USB cable update to get to the latest version.
>[!CAUTION] >Make sure you are using the **old version** of the Device Update for IoT Hub. To do that, navigate to **Device management > Updates** in your IoT Hub, select the **switch to the older version** link in the banner. For more information, please refer to [Update Azure Percept DK over-the-air](./how-to-update-over-the-air.md).
Microsoft would service each dev kit release with OTA packages. However, as ther
|Release|Applicable Version(s)|Download Links|Note| |||||
-|February Service Release (2202)|2021.106.111.115,<br>2021.107.129.116,<br>2021.109.129.108, <br>2021.111.124.109, <br>2022.101.112.106|[2022.102.109.102 OTA update package](<https://download.microsoft.com/download/f/f/3/ff37dfee-ee0e-4b2d-82ef-5926062fcdbd/2022.102.109.102 OTA update package.zip>)|Make sure you are using the **old version** of the Device Update for IoT Hub. To do that, navigate to **Device management > Updates** in your IoT Hub, select the **switch to the older version** link in the banner. For more information, please refer to [Update Azure Percept DK over-the-air](./how-to-update-over-the-air.md).|
+|March Service Release (2203)|2021.106.111.115,<br>2021.107.129.116,<br>2021.109.129.108, <br>2021.111.124.109, <br>2022.101.112.106, <br>2022.102.109.102|[2022.103.110.103 OTA update package](<https://download.microsoft.com/download/2/3/4/234bdbf8-8f08-4d7a-8b33-7d5afc921bf1/2022.103.110.103 OTA update package.zip>)|Make sure you are using the **old version** of the Device Update for IoT Hub. To do that, navigate to **Device management > Updates** in your IoT Hub, select the **switch to the older version** link in the banner. For more information, please refer to [Update Azure Percept DK over-the-air](./how-to-update-over-the-air.md).|
**Hard-stop releases:**
azure-percept Software Releases Usb Cable Updates https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-percept/software-releases-usb-cable-updates.md
This page provides information and download links for all the dev kit OS/firmwar
## Latest releases - **Latest service release**
-February Service Release (2202): [Azure-Percept-DK-1.0.20220209.1156-public_preview_1.0.zip](<https://download.microsoft.com/download/f/8/6/f86ce7b3-8d76-494e-82d9-dcfb71fc2580/Azure-Percept-DK-1.0.20220209.1156-public_preview_1.0.zip>)
-
+March Service Release (2203): [Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip](<https://download.microsoft.com/download/c/6/f/c6f6b152-699e-4f60-85b7-17b3ea57c189/Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip>)
- **Latest major update or known stable version** Feature Update (2104): [Azure-Percept-DK-1.0.20210409.2055.zip](https://download.microsoft.com/download/6/4/d/64d53e60-f702-432d-a446-007920a4612c/Azure-Percept-DK-1.0.20210409.2055.zip)
Feature Update (2104): [Azure-Percept-DK-1.0.20210409.2055.zip](https://download
|Release|Download Links|Note| |||::|
+|March Service Release (2203)|[Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip](<https://download.microsoft.com/download/c/6/f/c6f6b152-699e-4f60-85b7-17b3ea57c189/Azure-Percept-DK-1.0.20220310.1223-public_preview_1.0.zip>)||
|February Service Release (2202)|[Azure-Percept-DK-1.0.20220209.1156-public_preview_1.0.zip](<https://download.microsoft.com/download/f/8/6/f86ce7b3-8d76-494e-82d9-dcfb71fc2580/Azure-Percept-DK-1.0.20220209.1156-public_preview_1.0.zip>)|| |January Service Release (2201)|[Azure-Percept-DK-1.0.20220112.1519-public_preview_1.0.zip](<https://download.microsoft.com/download/1/6/4/164cfcf2-ce52-4e75-9dee-63bb4a128e71/Azure-Percept-DK-1.0.20220112.1519-public_preview_1.0.zip>)|| |November Service Release (2111)|[Azure-Percept-DK-1.0.20211124.1851-public_preview_1.0.zip](<https://download.microsoft.com/download/9/5/4/95464a73-109e-46c7-8624-251ceed0c5ea/Azure-Percept-DK-1.0.20211124.1851-public_preview_1.0.zip>)||
azure-sql Azure Sql Iaas Vs Paas What Is Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/azure-sql-iaas-vs-paas-what-is-overview.md
keywords: SQL Server cloud, SQL Server in the cloud, PaaS database, cloud SQL Se
Previously updated : 07/27/2020 Last updated : 03/18/2022 # What is Azure SQL? [!INCLUDE[appliesto-asf](includes/appliesto-asf.md)]
Azure constantly monitors your data for threats. With Azure SQL, you can:
There are several factors that can influence your decision to choose between the different data offerings: -- [Cost](#cost): Both PaaS and IaaS option include base price that covers underlying infrastructure and licensing. However, with IaaS option you need to invest additional time and resources to manage your database, while in PaaS you get these administration features included in the price. IaaS enables you to shut down resources while you are not using them to decrease the cost, while PaaS is always running unless you drop and re-create your resources when they are needed.
+- [Cost](#cost): Both platform as a service (PaaS) and infrastructure as a service (IaaS) options include base price that covers underlying infrastructure and licensing. However, with the IaaS option you need to invest additional time and resources to manage your database, while in PaaS you get these administration features included in the price. IaaS enables you to shut down resources while you are not using them to decrease the cost, while PaaS is always running unless you drop and re-create your resources when they are needed.
- [Administration](#administration): PaaS options reduce the amount of time that you need to invest to administer the database. However, it also limits the range of custom administration tasks and scripts that you can perform or run. For example, the CLR is not supported with SQL Database, but is supported for an instance of SQL Managed Instance. Also, no deployment options in PaaS support the use of trace flags. - [Service-level agreement](#service-level-agreement-sla): Both IaaS and PaaS provide high, industry standard SLA. PaaS option guarantees 99.99% SLA, while IaaS guarantees 99.95% SLA for infrastructure, meaning that you need to implement additional mechanisms to ensure availability of your databases. You can attain 99.99% SLA by creating an additional SQL virtual machine, and implementing the SQL Server Always On availability group high availability solution. - [Time to move to Azure](#market): SQL Server on Azure VM is the exact match of your environment, so migration from on-premises to the Azure VM is no different than moving the databases from one on-premises server to another. SQL Managed Instance also enables easy migration; however, there might be some changes that you need to apply before your migration.
With SQL Server, you can use built-in features and functionality that requires e
Azure SQL Database offers the following deployment options: - As a [*single database*](database/single-database-overview.md) with its own set of resources managed via a [logical SQL server](database/logical-servers.md). A single database is similar to a [contained database](/sql/relational-databases/databases/contained-databases) in SQL Server. This option is optimized for modern application development of new cloud-born applications. [Hyperscale](database/service-tier-hyperscale.md) and [serverless](database/serverless-tier-overview.md) options are available.
- - An [*elastic pool*](database/elastic-pool-overview.md), which is a collection of databases with a shared set of resources managed via a [logical SQL server](database/logical-servers.md). Single databases can be moved into and out of an elastic pool. This option is optimized for modern application development of new cloud-born applications using the multi-tenant SaaS application pattern. Elastic pools provide a cost-effective solution for managing the performance of multiple databases that have variable usage patterns.
+ - An [*elastic pool*](database/elastic-pool-overview.md), which is a collection of databases with a shared set of resources managed via a [logical server](database/logical-servers.md). Single databases can be moved into and out of an elastic pool. This option is optimized for modern application development of new cloud-born applications using the multi-tenant SaaS application pattern. Elastic pools provide a cost-effective solution for managing the performance of multiple databases that have variable usage patterns.
### Azure SQL Managed Instance
In general, SQL Database and SQL Managed Instance can dramatically increase the
| Azure SQL Database | Azure SQL Managed Instance | SQL Server on Azure VM | | : | : | : | |Supports most on-premises database-level capabilities. The most commonly used SQL Server features are available.<br/>99.995% availability guaranteed.<br/>Built-in backups, patching, recovery.<br/>Latest stable Database Engine version.<br/>Ability to assign necessary resources (CPU/storage) to individual databases.<br/>Built-in advanced intelligence and security.<br/>Online change of resources (CPU/storage).| Supports almost all on-premises instance-level and database-level capabilities. High compatibility with SQL Server.<br/>99.99% availability guaranteed.<br/>Built-in backups, patching, recovery.<br/>Latest stable Database Engine version.<br/>Easy migration from SQL Server.<br/>Private IP address within Azure Virtual Network.<br/>Built-in advanced intelligence and security.<br/>Online change of resources (CPU/storage).| You have full control over the SQL Server engine. Supports all on-premises capabilities.<br/>Up to 99.99% availability.<br/>Full parity with the matching version of on-premises SQL Server.<br/>Fixed, well-known Database Engine version.<br/>Easy migration from SQL Server.<br/>Private IP address within Azure Virtual Network.<br/>You have the ability to deploy application or services on the host where SQL Server is placed.|
-|Migration from SQL Server might be challenging.<br/>Some SQL Server features are not available.<br/>No guaranteed exact maintenance time (but nearly transparent).<br/>Compatibility with the SQL Server version can be achieved only using database compatibility levels.<br/>Private IP address support with [Azure Private Link](database/private-endpoint-overview.md).|There is still some minimal number of SQL Server features that are not available.<br/>No guaranteed exact maintenance time (but nearly transparent).<br/>Compatibility with the SQL Server version can be achieved only using database compatibility levels.|You need to manage your backups and patches.<br>You need to implement your own High-Availability solution.<br/>There is a downtime while changing the resources(CPU/storage)|
+|Migration from SQL Server might be challenging.<br/>Some SQL Server features are not available.<br/>Configurable [maintenance windows](database/maintenance-window.md).<br/>Compatibility with the SQL Server version can be achieved only using database compatibility levels.<br/>Private IP address support with [Azure Private Link](database/private-endpoint-overview.md).|There is still some minimal number of SQL Server features that are not available.<br/>Configurable [maintenance windows](database/maintenance-window.md).<br/>Compatibility with the SQL Server version can be achieved only using database compatibility levels.|You may use [manual or automated backups](virtual-machines/windows/backup-restore.md).<br>You need to implement your own High-Availability solution.<br/>There is a downtime while changing the resources(CPU/storage)|
| Databases of up to 100 TB. | Up to 16 TB. | SQL Server instances with up to 256 TB of storage. The instance can support as many databases as needed. | | On-premises application can access data in Azure SQL Database. | [Native virtual network implementation](managed-instance/vnet-existing-add-subnet.md) and connectivity to your on-premises environment using Azure Express Route or VPN Gateway. | With SQL virtual machines, you can have applications that run partly in the cloud and partly on-premises. For example, you can extend your on-premises network and Active Directory Domain to the cloud via [Azure Virtual Network](../virtual-network/virtual-networks-overview.md). For more information on hybrid cloud solutions, see [Extending on-premises data solutions to the cloud](/azure/architecture/data-guide/scenarios/hybrid-on-premises-and-cloud). |
azure-sql Connect Query Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/database/connect-query-go.md
Last updated 04/14/2021
# Quickstart: Use Golang to query a database in Azure SQL Database or Azure SQL Managed Instance [!INCLUDE[appliesto-sqldb-sqlmi](../includes/appliesto-sqldb-sqlmi.md)]
-In this quickstart, you'll use the [Golang](https://godoc.org/github.com/denisenkom/go-mssqldb) programming language to connect to a database in Azure SQL Database or Azure SQL Managed Instance. You'll then run Transact-SQL statements to query and modify data. [Golang](https://golang.org/) is an open-source programming language that makes it easy to build simple, reliable, and efficient software.
+In this quickstart, you'll use the [Golang](https://godoc.org/github.com/denisenkom/go-mssqldb) programming language to connect to a database in Azure SQL Database or Azure SQL Managed Instance. You'll then run Transact-SQL statements to query and modify data. [Golang](https://go.dev/) is an open-source programming language that makes it easy to build simple, reliable, and efficient software.
## Prerequisites
Get the connection information you need to connect to the database. You'll need
1. Create a file named **sample.go** in the **SqlServerSample** folder.
-2. In the file, paste this code. Add the values for your server, database, username, and password. This example uses the Golang [context methods](https://golang.org/pkg/context/) to make sure there's an active connection.
+2. In the file, paste this code. Add the values for your server, database, username, and password. This example uses the Golang [context methods](https://go.dev/pkg/context/) to make sure there's an active connection.
```go package main
azure-sql Connectivity Architecture https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/database/connectivity-architecture.md
Title: Azure SQL Database Connectivity Architecture
-description: This document explains the Azure SQL Database connectivity architecture for database connections from within Azure or from outside of Azure.
+ Title: Azure SQL Database connectivity architecture
+description: This article explains the Azure SQL Database connectivity architecture for database connections from within Azure or from outside of Azure.
Previously updated : 01/25/2021 Last updated : 03/18/2022 # Azure SQL Database and Azure Synapse Analytics connectivity architecture [!INCLUDE[appliesto-sqldb-asa](../includes/appliesto-sqldb-asa.md)]
The following steps describe how a connection is established to Azure SQL Databa
- Clients connect to the gateway, that has a public IP address and listens on port 1433. - The gateway, depending on the effective connection policy, redirects or proxies the traffic to the right database cluster.-- Inside the database cluster traffic is forwarded to the appropriate database.
+- Inside the database cluster, traffic is forwarded to the appropriate database.
## Connection policy
Servers in SQL Database and Azure Synapse support the following three options fo
- **Default:** This is the connection policy in effect on all servers after creation unless you explicitly alter the connection policy to either `Proxy` or `Redirect`. The default policy is`Redirect` for all client connections originating inside of Azure (for example, from an Azure Virtual Machine) and `Proxy`for all client connections originating outside (for example, connections from your local workstation).
-We highly recommend the `Redirect` connection policy over the `Proxy` connection policy for the lowest latency and highest throughput. However, you will need to meet the additional requirements for allowing network traffic as outlined above. If the client is an Azure Virtual Machine you can accomplish this using Network Security Groups (NSG) with [service tags](../../virtual-network/network-security-groups-overview.md#service-tags). If the client is connecting from a workstation on-premises then you may need to work with your network admin to allow network traffic through your corporate firewall.
+We highly recommend the `Redirect` connection policy over the `Proxy` connection policy for the lowest latency and highest throughput. However, you will need to meet the additional requirements for allowing network traffic as outlined above. If the client is an Azure Virtual Machine, you can accomplish this using Network Security Groups (NSG) with [service tags](../../virtual-network/network-security-groups-overview.md#service-tags). If the client is connecting from a workstation on-premises then you may need to work with your network admin to allow network traffic through your corporate firewall.
## Connectivity from within Azure
azure-sql Sql Database Paas Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/database/sql-database-paas-overview.md
Previously updated : 09/21/2020 Last updated : 03/18/2022 # What is Azure SQL Database? [!INCLUDE[appliesto-sqldb](../includes/appliesto-sqldb.md)]
-Azure SQL Database is a fully managed platform as a service (PaaS) database engine that handles most of the database management functions such as upgrading, patching, backups, and monitoring without user involvement. Azure SQL Database is always running on the latest stable version of the SQL Server database engine and patched OS with 99.99% availability. PaaS capabilities that are built into Azure SQL Database enable you to focus on the domain-specific database administration and optimization activities that are critical for your business.
+Azure SQL Database is a fully managed platform as a service (PaaS) database engine that handles most of the database management functions such as upgrading, patching, backups, and monitoring without user involvement. Azure SQL Database is always running on the latest stable version of the SQL Server database engine and patched OS with [99.99% availability](https://azure.microsoft.com/support/legal/sla/azure-sql-database). PaaS capabilities built into Azure SQL Database enable you to focus on the domain-specific database administration and optimization activities that are critical for your business.
With Azure SQL Database, you can create a highly available and high-performance data storage layer for the applications and solutions in Azure. SQL Database can be the right choice for a variety of modern cloud applications because it enables you to process both relational data and [non-relational structures](../multi-model-features.md), such as graphs, JSON, spatial, and XML.
Azure SQL Database provides the following deployment options for a database:
- [Elastic pool](elastic-pool-overview.md) is a collection of [single databases](single-database-overview.md) with a shared set of resources, such as CPU or memory. Single databases can be moved into and out of an elastic pool. > [!IMPORTANT]
-> To understand the feature differences between SQL Database and SQL Server, as well as the differences among different Azure SQL Database options, see [SQL Database features](features-comparison.md).
+> To understand the feature differences between SQL Database, SQL Server, and Azure SQL Managed Instance, as well as the differences among different Azure SQL Database options, see [SQL Database features](features-comparison.md).
SQL Database delivers predictable performance with multiple resource types, service tiers, and compute sizes. It provides dynamic scalability with no downtime, built-in intelligent optimization, global scalability and availability, and advanced security options. These capabilities allow you to focus on rapid app development and accelerating your time-to-market, rather than on managing virtual machines and infrastructure. SQL Database is currently in 38 datacenters around the world, so you can run your database in a datacenter near you. ## Scalable performance and pools You can define the amount of resources assigned. -- With single databases, each database is isolated from others and is portable. Each has its own guaranteed amount of compute, memory, and storage resources. The amount of the resources assigned to the database is dedicated to that database, and isn't shared with other databases in Azure. You can dynamically [scale single database resources](single-database-scale.md) up and down. The single database option provides different compute, memory, and storage resources for different needs. For example, you can get 1 to 80 vCores, or 32 GB to 4 TB. The [Hyperscale service tier](service-tier-hyperscale.md) for single databases enables you to scale to 100 TB, with fast backup and restore capabilities.
+- With single databases, each database is isolated from others and is portable. Each has its own guaranteed amount of compute, memory, and storage resources. The amount of the resources assigned to the database is dedicated to that database, and isn't shared with other databases in Azure. You can dynamically [scale single database resources](single-database-scale.md) up and down. The single database option provides different compute, memory, and storage resources for different needs. For example, you can get 1 to 128 vCores, or 32 GB to 4 TB. The [Hyperscale service tier](service-tier-hyperscale.md) for single databases enables you to scale to 100 TB, with fast backup and restore capabilities.
- With elastic pools, you can assign resources that are shared by all databases in the pool. You can create a new database, or move the existing single databases into a resource pool to maximize the use of resources and save money. This option also gives you the ability to dynamically [scale elastic pool resources](elastic-pool-scale.md) up and down.
-You can build your first app on a small, single database at a low cost per month in the general-purpose service tier. You can then change its service tier manually or programmatically at any time to the business-critical service tier, to meet the needs of your solution. You can adjust performance without downtime to your app or to your customers. Dynamic scalability enables your database to transparently respond to rapidly changing resource requirements. You pay for only the resources that you need when you need them.
+You can build your first app on a small, single database at a low cost per month in the [General Purpose](service-tier-general-purpose.md) service tier. You can then change its service tier manually or programmatically at any time to the [Business Critical](service-tier-business-critical.md) or [Hyperscale](service-tier-hyperscale.md) service tier, to meet the needs of your solution. You can adjust performance without downtime to your app or to your customers. Dynamic scalability enables your database to transparently respond to rapidly changing resource requirements. You pay for only the resources that you need when you need them.
*Dynamic scalability* is different from *autoscale*. Autoscale is when a service scales automatically based on criteria, whereas dynamic scalability allows for manual scaling without downtime. The single database option supports manual dynamic scalability, but not autoscale. For a more automatic experience, consider using elastic pools, which allow databases to share resources in a pool based on individual database needs. Another option is to use scripts that can help automate scalability for a single database. For an example, see [Use PowerShell to monitor and scale a single database](scripts/monitor-and-scale-database-powershell.md). ### Purchasing models SQL Database offers the following purchasing models:-- The [vCore-based purchasing model](service-tiers-vcore.md) lets you choose the number of vCores, the amount of memory, and the amount and speed of storage. The vCore-based purchasing model also allows you to use [Azure Hybrid Benefit for SQL Server](https://azure.microsoft.com/pricing/hybrid-benefit/) to gain cost savings. For more information about the Azure Hybrid Benefit, see the "Frequently asked questions" section later in this article.
+- The [vCore-based purchasing model](service-tiers-vcore.md) lets you choose the number of vCores, the amount of memory, and the amount and speed of storage. The vCore-based purchasing model also allows you to use [Azure Hybrid Benefit for SQL Server](https://azure.microsoft.com/pricing/hybrid-benefit/) to gain cost savings. For more information about the Azure Hybrid Benefit, see the [Frequently asked questions](#sql-database-frequently-asked-questions) section later in this article.
+ - The [DTU-based purchasing model](service-tiers-dtu.md) offers a blend of compute, memory, and I/O resources in three service tiers, to support light to heavy database workloads. Compute sizes within each tier provide a different mix of these resources, to which you can add additional storage resources.-- The [serverless model](serverless-tier-overview.md) automatically scales compute based on workload demand, and bills for the amount of compute used per second. The serverless compute tier also automatically pauses databases during inactive periods when only storage is billed, and automatically resumes databases when activity returns. ### Service tiers
-Azure SQL Database offers three service tiers that are designed for different types of applications:
-- [General Purpose/Standard](service-tier-general-purpose.md) service tier designed for common workloads. It offers budget-oriented balanced compute and storage options.-- [Business Critical/Premium](service-tier-business-critical.md) service tier designed for OLTP applications with high transaction rate and lowest-latency I/O. It offers the highest resilience to failures by using several isolated replicas.-- [Hyperscale](service-tier-hyperscale.md) service tier designed for very large OLTP database and the ability to autoscale storage and scale compute fluidly.
+Azure SQL Database offers three service tiers:
+- The [General Purpose/Standard](service-tier-general-purpose.md) service tier is designed for common workloads. It offers budget-oriented balanced compute and storage options.
+- The [Business Critical/Premium](service-tier-business-critical.md) service tier is designed for OLTP applications with high transaction rates and low latency I/O requirements. It offers the highest resilience to failures by using several isolated replicas.
+- The [Hyperscale](service-tier-hyperscale.md) service tier is designed for most business workloads. Hyperscale provides great flexibility and high performance with independently scalable compute and storage resources. It offers higher resilience to failures by allowing configuration of more than one isolated database replica.
+
+### Serverless compute
+
+The [serverless compute tier](serverless-tier-overview.md) is available within the vCore-based purchasing model when you select the [General Purpose service tier](service-tier-general-purpose.md).
+
+The serverless compute tier automatically scales compute based on workload demand, and bills for the amount of compute used per second. The serverless compute tier automatically pauses databases during inactive periods when only storage is billed, and automatically resumes databases when activity returns.
### Elastic pools to maximize resource utilization
Azure SQL Database provides advanced monitoring and troubleshooting features tha
- The built-in monitoring capabilities provided by the latest version of the SQL Server database engine. They enable you to find real-time performance insights. - PaaS monitoring capabilities provided by Azure that enable you to monitor and troubleshoot a large number of database instances.
-[Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store), a built-in SQL Server monitoring feature, records the performance of your queries in real time, and enables you to identify the potential performance issues and the top resource consumers. Automatic tuning and recommendations provide advice regarding the queries with the regressed performance and missing or duplicated indexes. Automatic tuning in SQL Database enables you to either manually apply the scripts that can fix the issues, or let SQL Database apply the fix. SQL Database can also test and verify that the fix provides some benefit, and retain or revert the change depending on the outcome. In addition to Query Store and automatic tuning capabilities, you can use standard [DMVs and XEvent](monitoring-with-dmvs.md) to monitor the workload performance.
+[Query Store](/sql/relational-databases/performance/best-practice-with-the-query-store), a built-in SQL Server monitoring feature, records the performance of your queries in real time, and enables you to identify the potential performance issues and the top resource consumers. [Automatic tuning and recommendations](automatic-tuning-overview.md) provide advice regarding the queries with the regressed performance and missing or duplicated indexes. Automatic tuning in SQL Database enables you to either manually apply the scripts that can fix the issues, or let SQL Database apply the fix. SQL Database can also test and verify that the fix provides some benefit, and retain or revert the change depending on the outcome. In addition to Query Store and automatic tuning capabilities, you can use standard [DMVs and XEvents](monitoring-with-dmvs.md) to monitor the workload performance.
Azure provides [built-in performance monitoring](performance-guidance.md) and [alerting](alerts-insights-configure-portal.md) tools, combined with performance ratings, that enable you to monitor the status of thousands of databases. Using these tools, you can quickly assess the impact of scaling up or down, based on your current or projected performance needs. Additionally, SQL Database can [emit metrics and resource logs](metrics-diagnostic-telemetry-logging-streaming-export-configure.md) for easier monitoring. You can configure SQL Database to store resource usage, workers and sessions, and connectivity into one of these Azure resources:
SQL Database provides a range of [built-in security and compliance features](../
> [!IMPORTANT] > Microsoft has certified Azure SQL Database (all deployment options) against a number of compliance standards. For more information, see the [Microsoft Azure Trust Center](https://gallery.technet.microsoft.com/Overview-of-Azure-c1be3942), where you can find the most current list of SQL Database compliance certifications.
-### Advance threat protection
+### <a name="advance-threat-protection"></a> Advanced threat protection
Microsoft Defender for SQL is a unified package for advanced SQL security capabilities. It includes functionality for managing your database vulnerabilities, and detecting anomalous activities that might indicate a threat to your database. It provides a single location for enabling and managing these capabilities.
SQL Database makes building and maintaining applications easier and more product
|[Azure Data Studio](/sql/azure-data-studio/)|A cross-platform database tool that runs on Windows, macOS, and Linux.| |[SQL Server Management Studio](/sql/ssms/download-sql-server-management-studio-ssms)|A free, downloadable client application for managing any SQL infrastructure, from SQL Server to SQL Database.| |[SQL Server Data Tools in Visual Studio](/sql/ssdt/download-sql-server-data-tools-ssdt)|A free, downloadable client application for developing SQL Server relational databases, databases in Azure SQL Database, Integration Services packages, Analysis Services data models, and Reporting Services reports.|
-|[Visual Studio Code](https://code.visualstudio.com/docs)|A free, downloadable, open-source code editor for Windows, macOS, and Linux. It supports extensions, including the [mssql extension](https://aka.ms/mssql-marketplace) for querying Microsoft SQL Server, Azure SQL Database, and Azure Azure Synapse Analytics.|
+|[Visual Studio Code](https://code.visualstudio.com/docs)|A free, downloadable, open-source code editor for Windows, macOS, and Linux. It supports extensions, including the [mssql extension](https://aka.ms/mssql-marketplace) for querying Microsoft SQL Server, Azure SQL Database, and Azure Synapse Analytics.|
SQL Database supports building applications with Python, Java, Node.js, PHP, Ruby, and .NET on macOS, Linux, and Windows. SQL Database supports the same [connection libraries](connect-query-content-reference-guide.md#libraries) as SQL Server.
SQL Database supports building applications with Python, Java, Node.js, PHP, Rub
### Can I control when patching downtime occurs?
-No. The impact of patching is generally not noticeable if you [employ retry logic](develop-overview.md#resiliency) in your app. For more information, see [Planning for Azure maintenance events in Azure SQL Database](planned-maintenance.md).
+The [maintenance window feature](maintenance-window.md) allows you to configure predictable maintenance window schedules for eligible databases in Azure SQL Database. [Maintenance window advance notifications](../database/advance-notifications.md) are available for databases configured to use a non-default [maintenance window](maintenance-window.md).
+### How do I plan for maintenance events?
+Patching is generally not noticeable if you [employ retry logic](develop-overview.md#resiliency) in your app. For more information, see [Planning for Azure maintenance events in Azure SQL Database](planned-maintenance.md).
## Engage with the SQL Server engineering team - [DBA Stack Exchange](https://dba.stackexchange.com/questions/tagged/sql-server): Ask database administration questions. - [Stack Overflow](https://stackoverflow.com/questions/tagged/sql-server): Ask development questions.-- [Microsoft Q&A question page](/answers/topics/azure-synapse-analytics.html): Ask technical questions.
+- [Microsoft Q&A question page](/answers/topics/azure-sql-database.html): Ask technical questions.
- [Feedback](https://aka.ms/sqlfeedback): Report bugs and request features. - [Reddit](https://www.reddit.com/r/SQLServer/): Discuss SQL Server.
azure-sql Auditing Configure https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/managed-instance/auditing-configure.md
f1_keywords:
- "mi.azure.sqlaudit.general.f1" Previously updated : "03/23/2022" Last updated : "03/25/2022" # Get started with Azure SQL Managed Instance auditing
For additional information:
Auditing of Microsoft Support operations for SQL Managed Instance allows you to audit Microsoft support engineers' operations when they need to access your server during a support request. The use of this capability, along with your auditing, enables more transparency into your workforce and allows for anomaly detection, trend visualization, and data loss prevention.
-To enable auditing of Microsoft Support operations navigate to create audit under audit in your SQL MI Instance, and switch Enable Auditing of Microsoft support operations to ON
+To enable auditing of Microsoft Support operations, navigate to **Create Audit** under **Security** > **Audit** in your SQL Manage Instance, and select **Microsoft support operations**.
![create audit icon](./media/auditing-configure/support-operations.png)
azure-sql Subnet Service Aided Configuration Enable https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/managed-instance/subnet-service-aided-configuration-enable.md
ms.devlang:
Previously updated : 03/12/2020 Last updated : 03/25/2022 # Enabling service-aided subnet configuration for Azure SQL Managed Instance [!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)]
Automatically configured network security groups and route table rules are visib
Service-aided configuration is enabled automatically once you turn on [subnet-delegation](../../virtual-network/subnet-delegation-overview.md) for `Microsoft.Sql/managedInstances` resource provider. > [!IMPORTANT]
-> Once subnet-delegation is turned on you could not turn it off until you remove last virtual cluster from the subnet. For more details on how to delete virtual cluster see the following [article](virtual-cluster-delete.md#delete-a-virtual-cluster-from-the-azure-portal).
+> Once subnet-delegation is turned on you could not turn it off until the very last virtual cluster is removed from the subnet. For more details on virtual cluster lifetime see the following [article](virtual-cluster-delete.md).
> [!NOTE] > As service-aided subnet configuration is essential feature for maintaining SLA, starting May 1st 2020, it won't be possible to deploy managed instances in subnets that are not delegated to managed instance resource provider. On July 1st 2020 all subnets containing managed instances will be automatically delegated to managed instance resource provider.
azure-sql Virtual Cluster Delete https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/azure-sql/managed-instance/virtual-cluster-delete.md
Previously updated : 08/20/2021 Last updated : 03/25/2022 # Delete a subnet after deleting an Azure SQL Managed Instance [!INCLUDE[appliesto-sqlmi](../includes/appliesto-sqlmi.md)]
-This article provides guidelines on how to manually delete a subnet after deleting the last Azure SQL Managed Instance residing in it.
+This article provides guidelines on how to manually delete a subnet after deleting the last Azure SQL Managed Instance residing in it. You can [delete a virtual network subnet](../../virtual-network/virtual-network-manage-subnet.md#delete-a-subnet) only if there are no resources in the subnet.
-SQL Managed Instances are deployed into [virtual clusters](connectivity-architecture-overview.md#virtual-cluster-connectivity-architecture). Each virtual cluster is associated with a subnet and deployed together with first instance creation. In the same way, a virtual cluster is automatically removed together with last instance deletion leaving the subnet empty and ready for removal. There is no need for any manual action on the virtual cluster in order to release the subnet. Once the last virtual cluster is deleted, you can go and delete the subnet
+SQL Managed Instances are deployed into [virtual clusters](connectivity-architecture-overview.md#virtual-cluster-connectivity-architecture). Each virtual cluster is associated with a subnet and **automatically deployed** together with first instance creation. In the same way, a virtual cluster is **automatically removed** together with last instance deletion leaving the subnet empty and ready for removal.
-There are rare circumstances in which create operation can fail and result with deployed empty virtual cluster. Additionally, as instance creation [can be canceled](management-operations-cancel.md), it is possible for a virtual cluster to be deployed with instances residing inside, in a failed state. Virtual cluster removal will automatically be initiated in these situations and removed in the background.
+>[!IMPORTANT]
+>There is no need for any manual action on the virtual cluster in order to release the subnet. Once the last virtual cluster is deleted, you can go and delete the subnet.
-> [!NOTE]
-> There are no charges for keeping an empty virtual cluster or instances that have failed to create.
+There are rare circumstances in which create operation can fail and result with deployed empty virtual cluster. Additionally, as instance creation [can be canceled](management-operations-cancel.md), it is possible for a virtual cluster to be deployed with instances residing inside, in a failed to deploy state. Virtual cluster removal will automatically be initiated in these situations and removed in the background.
> [!IMPORTANT]
-> - The virtual cluster should contain no SQL Managed Instances for the deletion to be successful. This does not include instances that have failed to create.
+> - There are no charges for keeping an empty virtual cluster or instances that have failed to create.
> - Deletion of a virtual cluster is a long-running operation lasting for about 1.5 hours (see [SQL Managed Instance management operations](management-operations-overview.md) for up-to-date virtual cluster delete time). The virtual cluster will still be visible in the portal until this process is completed. > - Only one delete operation can be run on the virtual cluster. All subsequent customer-initiated delete requests will result with an error as delete operation is already in progress.
-## Delete a virtual cluster from the Azure portal
+## Delete a virtual cluster from the Azure portal [DEPRECATED]
> [!IMPORTANT] > Starting September 1, 2021. all virtual clusters are automatically removed when last instance in the cluster has been deleted. Manual removal of the virtual cluster is not required anymore.
Azure portal notifications will show you a confirmation that the request to dele
> [!TIP] > If there are no SQL Managed Instances shown in the virtual cluster, and you are unable to delete the virtual cluster, ensure that you do not have an ongoing instance deployment in progress. This includes started and canceled deployments that are still in progress. This is because these operations will still use the virtual cluster, locking it from deletion. Review the **Deployments** tab of the resource group where the instance was deployed to see any deployments in progress. In this case, wait for the deployment to complete, then delete the SQL Managed Instance. The virtual cluster will be synchronously deleted as part of the instance removal.
-## Delete a virtual cluster by using the API
+## Delete a virtual cluster by using the API [DEPRECATED]
+
+> [!IMPORTANT]
+> Starting September 1, 2021. all virtual clusters are automatically removed when last instance in the cluster has been deleted. Manual removal of the virtual cluster is not required anymore.
To delete a virtual cluster through the API, use the URI parameters specified in the [virtual clusters delete method](/rest/api/sql/virtualclusters/delete).
backup Backup Azure Vms Enhanced Policy https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/backup/backup-azure-vms-enhanced-policy.md
Title: Back up Azure VMs with Enhanced policy (in preview) description: Learn how to configure Enhanced policy to back up VMs. Previously updated : 02/18/2022 Last updated : 03/25/2022
Follow these steps:
6. Click **Create**. >[!Note]
+>- The support for Enhanced policy is available in all Azure public regions, and not in US Sovereign regions.
>- We support Enhanced policy configuration through [Recovery Services vault](./backup-azure-arm-vms-prepare.md) and [VM Manage blade](./backup-during-vm-creation.md#start-a-backup-after-creating-the-vm) only. Configuration through Backup center is currently not supported. >- For hourly backups, the last backup of the day is transferred to vault. If backup fails, the first backup of the next day is transferred to vault. >- Enhanced policy can be only availed for unprotected VMs that are new to Azure Backup. Note that Azure VMs that are protected with existing policy can't be moved to Enhanced policy.
batch Batch Linux Nodes https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/batch/batch-linux-nodes.md
Not all Marketplace images are compatible with the currently available Batch nod
### Node agent SKU
-The [Batch node agent](https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md) is a program that runs on each node in the pool and provides the command-and-control interface between the node and the Batch service. There are different implementations of the node agent, known as SKUs, for different operating systems. Essentially, when you create a Virtual Machine Configuration, you first specify the virtual machine image reference, and then you specify the node agent to install on the image. Typically, each node agent SKU is compatible with multiple virtual machine images. Here are a few examples of node agent SKUs:
--- batch.node.ubuntu 18.04-- batch.node.centos 7-- batch.node.windows amd64
+The [Batch node agent](https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md) is a program that runs on each node in the pool and provides the command-and-control interface between the node and the Batch service. There are different implementations of the node agent, known as SKUs, for different operating systems. Essentially, when you create a Virtual Machine Configuration, you first specify the virtual machine image reference, and then you specify the node agent to install on the image. Typically, each node agent SKU is compatible with multiple virtual machine images. To view supported Marketplace VM images with their corresponding node agent SKUs, you can refer to [Account - List Supported Images - REST API (Azure Batch Service) | Microsoft Docs](/rest/api/batchservice/account/list-supported-images).
## Create a Linux pool: Batch Python
batch Quick Run Python https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/batch/quick-run-python.md
After completing this quickstart, you'll understand key concepts of the Batch se
- A Batch account and a linked Azure Storage account. To create these accounts, see the Batch quickstarts using the [Azure portal](quick-create-portal.md) or [Azure CLI](quick-create-cli.md). -- [Python](https://python.org/downloads) version 2.7 or 3.6 or later, including the [pip](https://pip.pypa.io/en/stable/installing/) package manager.
+- [Python](https://python.org/downloads) version 3.6 or later, including the [pip](https://pip.pypa.io/en/stable/installing/) package manager.
## Sign in to Azure
pip install -r requirements.txt
Open the file `config.py`. Update the Batch and storage account credential strings with the values you obtained for your accounts. For example: ```Python
-_BATCH_ACCOUNT_NAME = 'mybatchaccount'
-_BATCH_ACCOUNT_KEY = 'xxxxxxxxxxxxxxxxE+yXrRvJAqT9BlXwwo1CwF+SwAYOxxxxxxxxxxxxxxxx43pXi/gdiATkvbpLRl3x14pcEQ=='
-_BATCH_ACCOUNT_URL = 'https://mybatchaccount.mybatchregion.batch.azure.com'
-_STORAGE_ACCOUNT_NAME = 'mystorageaccount'
-_STORAGE_ACCOUNT_KEY = 'xxxxxxxxxxxxxxxxy4/xxxxxxxxxxxxxxxxfwpbIC5aAWA8wDu+AFXZB827Mt9lybZB1nUcQbQiUrkPtilK5BQ=='
+BATCH_ACCOUNT_NAME = 'mybatchaccount'
+BATCH_ACCOUNT_KEY = 'xxxxxxxxxxxxxxxxE+yXrRvJAqT9BlXwwo1CwF+SwAYOxxxxxxxxxxxxxxxx43pXi/gdiATkvbpLRl3x14pcEQ=='
+BATCH_ACCOUNT_URL = 'https://mybatchaccount.mybatchregion.batch.azure.com'
+STORAGE_ACCOUNT_NAME = 'mystorageaccount'
+STORAGE_ACCOUNT_KEY = 'xxxxxxxxxxxxxxxxy4/xxxxxxxxxxxxxxxxfwpbIC5aAWA8wDu+AFXZB827Mt9lybZB1nUcQbQiUrkPtilK5BQ=='
``` ## Run the app
To interact with a storage account, the app creates a [BlobServiceClient](/pytho
```python blob_service_client = BlobServiceClient(
- account_url="https://{}.{}/".format(
- config._STORAGE_ACCOUNT_NAME,
- config._STORAGE_ACCOUNT_DOMAIN
- ),
- credential=config._STORAGE_ACCOUNT_KEY
+ account_url=f"https://{config.STORAGE_ACCOUNT_NAME}.{config.STORAGE_ACCOUNT_DOMAIN}/",
+ credential=config.STORAGE_ACCOUNT_KEY
) ```
input_files = [
The app creates a [BatchServiceClient](/python/api/azure.batch.batchserviceclient) object to create and manage pools, jobs, and tasks in the Batch service. The Batch client in the sample uses shared key authentication. Batch also supports Azure Active Directory authentication. ```python
-credentials = SharedKeyCredentials(config._BATCH_ACCOUNT_NAME,
- config._BATCH_ACCOUNT_KEY)
+credentials = SharedKeyCredentials(config.BATCH_ACCOUNT_NAME,
+ config.BATCH_ACCOUNT_KEY)
batch_client = BatchServiceClient( credentials,
- batch_url=config._BATCH_ACCOUNT_URL)
+ batch_url=config.BATCH_ACCOUNT_URL)
``` ### Create a pool of compute nodes To create a Batch pool, the app uses the [PoolAddParameter](/python/api/azure-batch/azure.batch.models.pooladdparameter) class to set the number of nodes, VM size, and a pool configuration. Here, a [VirtualMachineConfiguration](/python/api/azure-batch/azure.batch.models.virtualmachineconfiguration) object specifies an [ImageReference](/python/api/azure-batch/azure.batch.models.imagereference) to an Ubuntu Server 20.04 LTS image published in the Azure Marketplace. Batch supports a wide range of Linux and Windows Server images in the Azure Marketplace, as well as custom VM images.
-The number of nodes (`_POOL_NODE_COUNT`) and VM size (`_POOL_VM_SIZE`) are defined constants. The sample by default creates a pool of 2 size *Standard_DS1_v2* nodes. The size suggested offers a good balance of performance versus cost for this quick example.
+The number of nodes (`POOL_NODE_COUNT`) and VM size (`POOL_VM_SIZE`) are defined constants. The sample by default creates a pool of 2 size *Standard_DS1_v2* nodes. The size suggested offers a good balance of performance versus cost for this quick example.
The [pool.add](/python/api/azure-batch/azure.batch.operations.pooloperations) method submits the pool to the Batch service.
new_pool = batchmodels.PoolAddParameter(
version="latest" ), node_agent_sku_id="batch.node.ubuntu 20.04"),
- vm_size=config._POOL_VM_SIZE,
- target_dedicated_nodes=config._POOL_NODE_COUNT
+ vm_size=config.POOL_VM_SIZE,
+ target_dedicated_nodes=config.POOL_NODE_COUNT
) batch_service_client.pool.add(new_pool) ```
The app creates a list of task objects using the [TaskAddParameter](/python/api/
Then, the app adds tasks to the job with the [task.add_collection](/python/api/azure-batch/azure.batch.operations.taskoperations) method, which queues them to run on the compute nodes. ```python
-tasks = list()
+tasks = []
-for idx, input_file in enumerate(input_files):
- command = "/bin/bash -c \"cat {}\"".format(input_file.file_path)
+for idx, input_file in enumerate(resource_input_files):
+ command = f"/bin/bash -c \"cat {input_file.file_path}\""
tasks.append(batchmodels.TaskAddParameter(
- id='Task{}'.format(idx),
+ id=f'Task{idx}',
command_line=command, resource_files=[input_file] ) )+ batch_service_client.task.add_collection(job_id, tasks) ```
tasks = batch_service_client.task.list(job_id)
for task in tasks: node_id = batch_service_client.task.get(job_id, task.id).node_info.node_id
- print("Task: {}".format(task.id))
- print("Node: {}".format(node_id))
+ print(f"Task: {task.id}")
+ print(f"Node: {node_id}")
stream = batch_service_client.file.get_from_task(
- job_id, task.id, config._STANDARD_OUT_FILE_NAME)
+ job_id, task.id, config.STANDARD_OUT_FILE_NAME)
file_text = _read_stream_as_string( stream,
- encoding)
+ text_encoding)
+
+ if text_encoding is None:
+ text_encoding = DEFAULT_ENCODING
+
+ sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = text_encoding)
+ sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = text_encoding)
+ print("Standard output:") print(file_text) ```
cognitive-services Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/Bing-News-Search/go.md
This quickstart uses the Go language to call the Bing News Search API. The results include names and URLs of news sources identified by the query string. ## Prerequisites
-* Install the [Go binaries](https://golang.org/dl/).
+* Install the [Go binaries](https://go.dev/dl/).
* Install the go-spew library to use a deep pretty printer to display the results. Use this command to install the library: `$ go get -u https://github.com/davecgh/go-spew`. [!INCLUDE [cognitive-services-bing-news-search-signup-requirements](../../../includes/cognitive-services-bing-news-search-signup-requirements.md)]
cognitive-services Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/Bing-Web-Search/quickstarts/go.md
Use this quickstart to make your first call to the Bing Web Search API. This Go
## Prerequisites Here are a few things that you'll need before running this quickstart:
-* [Go binaries](https://golang.org/dl/)
+* [Go binaries](https://go.dev/dl/)
* A subscription key [!INCLUDE [bing-web-search-quickstart-signup](../../../../includes/bing-web-search-quickstart-signup.md)]
cognitive-services Spatial Analysis Container https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/Computer-vision/spatial-analysis-container.md
Azure Stack Edge is a Hardware-as-a-Service solution and an AI-enabled edge comp
* 4 GB system RAM * 4 GB of GPU RAM * 8 core CPU
-* 1 NVIDIA Tesla T4 GPU
+* 1 NVIDIA CUDA Compute Capable devices 6.0 or higher ( e.g.: NVIDIA Tesla T4, 1080Ti, or 2080Ti )
* 20 GB of HDD space #### Recommended hardware
cognitive-services Multi Turn https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/QnAMaker/How-To/multi-turn.md
QnA Maker supports version control by including multi-turn conversation steps in
## Next steps
-* Learn more about contextual conversations from this [dialog sample](https://github.com/microsoft/BotBuilder-Samples/blob/master/samples/csharp_dotnetcore/adaptive-dialog/07.qnamaker/QnAMaker.csproj) or learn more about [conceptual bot design for multi-turn conversations](/azure/bot-service/bot-builder-conversations).
+* Learn more about contextual conversations from this [dialog sample](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/csharp_dotnetcore/11.qnamaker) or learn more about [conceptual bot design for multi-turn conversations](/azure/bot-service/bot-builder-conversations).
cognitive-services How To Custom Speech Train Model https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/Speech-Service/how-to-custom-speech-train-model.md
In this article, you'll learn how to train and deploy Custom Speech models. Training a speech-to-text model can improve recognition accuracy for the Microsoft baseline model. You use human-labeled transcriptions and related text to train a model. And you use these datasets, along with previously uploaded audio data, to refine and train the speech-to-text model.
+> [!NOTE]
+> You pay to use Custom Speech models, but you are not charged for training a model.
+ ## Use training to resolve accuracy problems If you're encountering recognition problems with a base model, you can use human-labeled transcripts and related data to train a custom model and help improve accuracy. To determine which dataset to use to address your problems, refer to the following table:
cognitive-services Quickstart Translator https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/Translator/quickstart-translator.md
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/translate?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("from", "en")
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/translate?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("to", "de")
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/detect?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() u.RawQuery = q.Encode()
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/translate?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("to", "th")
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/transliterate?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("language", "th")
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/translate?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("to", "es")
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/breaksentence?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() u.RawQuery = q.Encode()
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/dictionary/lookup?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("from", "en")
func main() {
endpoint := "https://api.cognitive.microsofttranslator.com/" uri := endpoint + "/dictionary/examples?api-version=3.0"
- // Build the request URL. See: https://golang.org/pkg/net/url/#example_URL_Parse
+ // Build the request URL. See: https://go.dev/pkg/net/url/#example_URL_Parse
u, _ := url.Parse(uri) q := u.Query() q.Add("from", "en")
cognitive-services Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cognitive-services/bing-visual-search/quickstarts/go.md
Use this quickstart to make your first call to the Bing Visual Search API using
## Prerequisites
-* Install the [Go binaries](https://golang.org/dl/).
+* Install the [Go binaries](https://go.dev/dl/).
* Install the go-spew deep pretty printer, which is used to display results. To install go-spew, use the `$ go get -u https://github.com/davecgh/go-spew` command. [!INCLUDE [cognitive-services-bing-visual-search-signup-requirements](../../../../includes/cognitive-services-bing-visual-search-signup-requirements.md)]
communication-services Insights https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/analytics/insights.md
The **Volume** tab under the **Voice and video** modality displays the number of
:::image type="content" source="media\workbooks\voice-and-video-volume.png" alt-text="Voice and video volume":::
-The **Volume** tab contains a **Grouping** parameter, which helps visualize the number of calls and participants segmented by either Call type (P2P vs. Group calls) and Interop Calls (pure Azure Communication Services (ACS) vs. Teams Interop):
+The **Volume** tab contains a **Grouping** parameter, which helps visualize the number of calls and participants segmented by either Call type (P2P vs. Group calls) and Interop Calls (pure Azure Communication Services vs. Teams Interop):
:::image type="content" source="media\workbooks\voice-and-video-volume-grouping.png" alt-text="Voice and video volume grouping":::
The **Chat** tab displays the data for all chat-related operations and their res
:::image type="content" source="media\workbooks\chat.png" alt-text="Chat tab":::
-The **SMS** tab displays the operations and results for SMS usage through an ACS resource (we currently donΓÇÖt have any data for this modality):
+The **SMS** tab displays the operations and results for SMS usage through an Azure Communication Services resource (we currently donΓÇÖt have any data for this modality):
:::image type="content" source="media\workbooks\sms.png" alt-text="SMS tab":::
communication-services Authentication https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/authentication.md
Access key authentication is suitable for service applications running in a trus
Since the access key is part of the connection string of your resource, authentication with a connection string is equivalent to authentication with an access key.
-If you wish to call ACS' APIs manually using an access key, then you will need to sign the request. Signing the request is explained, in detail, within a [tutorial](../tutorials/hmac-header-tutorial.md).
+If you wish to call Azure Communication Services' APIs manually using an access key, then you will need to sign the request. Signing the request is explained, in detail, within a [tutorial](../tutorials/hmac-header-tutorial.md).
### Azure AD authentication
communication-services Best Practices https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/best-practices.md
# Best practices: Azure Communication Services calling SDKs
-This article provides information about best practices related to the Azure Communication Services (ACS) calling SDKs.
+This article provides information about best practices related to the Azure Communication Services calling SDKs.
-## ACS web JavaScript SDK best practices
+## Azure Communication Services web JavaScript SDK best practices
This section provides information about best practices associated with the Azure Communication Services JavaScript voice and video calling SDK. ## JavaScript voice and video calling SDK
-### Plug-in microphone or enable microphone from device manager when ACS call in progress
+### Plug-in microphone or enable microphone from device manager when Azure Communication Services call in progress
When there is no microphone available at the beginning of a call, and then a microphone becomes available, the "noMicrophoneDevicesEnumerated" call diagnostic event will be raised. When this happens, your application should invoke `askDevicePermission` to obtain user consent to enumerate devices. Then user will then be able to mute/unmute the microphone.
Your application should invoke `call.hangup` when the `onbeforeunload` event is
Your application should not connect to calls from multiple browser tabs simultaneously as this can cause undefined behavior due to resource allocation for microphone and camera on the device. Developers are encouraged to always hang up calls when completed in the background before starting a new one. ### Handle OS muting call when phone call comes in.
-While on an ACS call (for both iOS and Android) if a phone call comes in or Voice assistant is activated, the OS will automatically mute the user's microphone and camera. On Android, the call automatically unmutes and video restarts after the phone call ends. On iOS, it requires user action to "unmute" and "start video" again. You can listen for the notification that the microphone was muted unexpectedly with the quality event of `microphoneMuteUnexpectedly`. Do note in order to be able to rejoin a call properly you will need to use SDK 1.2.3-beta.1 or higher.
+While on an Azure Communication Services call (for both iOS and Android) if a phone call comes in or Voice assistant is activated, the OS will automatically mute the user's microphone and camera. On Android, the call automatically unmutes and video restarts after the phone call ends. On iOS, it requires user action to "unmute" and "start video" again. You can listen for the notification that the microphone was muted unexpectedly with the quality event of `microphoneMuteUnexpectedly`. Do note in order to be able to rejoin a call properly you will need to use SDK 1.2.3-beta.1 or higher.
```javascript const latestMediaDiagnostic = call.api(SDK.Features.Diagnostics).media.getLatest();
communication-services Bring Your Own Storage https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/bring-your-own-storage.md
[!INCLUDE [Private Preview Disclaimer](../includes/private-preview-include-section.md)]
-In many applications, end-users may want to store their Call Recording files long-term. Some of the common scenarios are compliance, quality assurance, assessment, post-call analysis, training, and coaching. Now with the BYOS (bring your own storage) being available, end-users will have an option to store their files long-term and manage the files in a way they need. The end-user will be responsible for legal regulations about storing the data. BYOS simplifies downloading of the files from Azure Communication Services (ACS) and minimizes the number of support requests if the customer was unable to download the recording in 48 hours. Data will be transferred securely from Microsoft Azure blob storage to a customer Azure blob storage.
+In many applications, end-users may want to store their Call Recording files long-term. Some of the common scenarios are compliance, quality assurance, assessment, post-call analysis, training, and coaching. Now with the BYOS (bring your own storage) being available, end-users will have an option to store their files long-term and manage the files in a way they need. The end-user will be responsible for legal regulations about storing the data. BYOS simplifies downloading of the files from Azure Communication Services and minimizes the number of support requests if the customer was unable to download the recording in 48 hours. Data will be transferred securely from Microsoft Azure blob storage to a customer Azure blob storage.
Here are a few examples: - Contact Center Recording - Compliance Recording Scenario
BYOS can be easily integrated into any application regardless of the programming
![Bring your own storage concept diagram](../media/byos-diagramm.png) 1. Contoso enables MI (managed identities) on an Azure Storage account.
-2. Contoso creates ACS (azure communication services) resource.
+2. Contoso creates Azure Communication Services resource.
![Bring your own storage resource page](../media/byos-link-storage.png)
-3. Contoso enables BYOS on the ACS resource and specifies the URL to link with the storage.
+3. Contoso enables BYOS on the Azure Communication Services resource and specifies the URL to link with the storage.
4. After the resource has been created Contoso will see linked storage and will be able to change settings later in time ![Bring your own storage add storage page](../media/byos-add-storage.png)
-5. If Contoso has built an application with Call Recording, they can record a meeting. Once the recording file is available, Contoso will receive an event from ACS that a file is copied over to their storage.
+5. If Contoso has built an application with Call Recording, they can record a meeting. Once the recording file is available, Contoso will receive an event from Azure Communication Services that a file is copied over to their storage.
6. After the notification has been received Contoso will see the file 6. After the notification has been received Contoso will see the file located in the storage they have specified.
-7. Contoso has successfully linked their storage with ACS!
-
+7. Contoso has successfully linked their storage with Azure Communication Services!
![Bring your own storage success page](../media/byos-storage-created.png)
communication-services Call Flows https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/call-flows.md
The section below gives an overview of the call flows in Azure Communication Ser
When you establish a peer-to-peer or group call, two protocols are used behind the scenes - HTTP (REST) for signaling and SRTP for media.
-Signaling between the SDKs or between SDKs and Communication Services Signaling Controllers is handled with HTTP REST (TLS). The ACS uses TLS 1.2. For Real-Time Media Traffic (RTP), the User Datagram Protocol (UDP) is preferred. If the use of UDP is prevented by your firewall, the SDK will use the Transmission Control Protocol (TCP) for media.
+Signaling between the SDKs or between SDKs and Communication Services Signaling Controllers is handled with HTTP REST (TLS). Azure Communication Services uses TLS 1.2. For Real-Time Media Traffic (RTP), the User Datagram Protocol (UDP) is preferred. If the use of UDP is prevented by your firewall, the SDK will use the Transmission Control Protocol (TCP) for media.
Let's review the signaling and media protocols in various scenarios.
For Alice it will be the NAT of the coffee shop and for Bob it will be the NAT o
### Case 3: VoIP where neither a direct nor NAT connection is possible
-If one or both client devices are behind a symmetric NAT, a separate cloud service to relay the media between the two SDKs is required. This service is called TURN (Traversal Using Relays around NAT) and is also provided by the Communication Services. The Communication Services Calling SDK automatically uses TURN services based on detected network conditions. Use of Microsoft's TURN service is charged separately.
+If one or both client devices are behind a symmetric NAT, a separate cloud service to relay the media between the two SDKs is required. This service is called TURN (Traversal Using Relays around NAT) and is also provided by the Communication Services. The Communication Services Calling SDK automatically uses TURN services based on detected network conditions.
:::image type="content" source="./media/call-flows/about-voice-case-3.png" alt-text="Diagram showing a VOIP call which utilizes a TURN connection.":::
communication-services Detailed Call Flows https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/detailed-call-flows.md
The audio/video/screen sharing (VBSS) service is part of Azure Communication Ser
Internal clients will obtain local, reflexive, and relay candidates in the same manner as described for one-to-one calls. The clients will send these candidates to the service in an invite. The service does not use a relay since it has a publicly reachable IP address, so it responds with its local IP address candidate. The client and the service will check connectivity in the same manner described for one-to-one calls. *Figure 9 ΓÇô Communication Services Group Calls*
communication-services Calling Chat https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/interop/calling-chat.md
> > For support, questions or to provide feedback or report issues, please use the [Teams Interop ad hoc calling and chat channel](https://teams.microsoft.com/l/channel/19%3abfc7d5e0b883455e80c9509e60f908fb%40thread.tacv2/Teams%2520Interop%2520ad%2520hoc%2520calling%2520and%2520chat?groupId=d78f76f3-4229-4262-abfb-172587b7a6bb&tenantId=72f988bf-86f1-41af-91ab-2d7cd011db47). You must be a member of the Azure Communication Service TAP team.
-As part of this preview, the Azure Communication Services SDKs can be used to build applications that enable bring your own identity (BYOI) users to start 1:1 calls or 1:n chats with Teams users. [Standard ACS pricing](https://azure.microsoft.com/pricing/details/communication-services/) applies to these users, but there's no extra fee for the interoperability capability itself.
+As part of this preview, the Azure Communication Services SDKs can be used to build applications that enable bring your own identity (BYOI) users to start 1:1 calls or 1:n chats with Teams users. [Standard Azure Communication Services pricing](https://azure.microsoft.com/pricing/details/communication-services/) applies to these users, but there's no extra fee for the interoperability capability itself.
With the Calling SDK, a Communication Services user or endpoint can start a 1:1
Calling another Communication Services endpoint using [communicationUserId](/javascript/api/@azure/communication-common/communicationuseridentifier#communicationUserId): ```js
-const acsCallee = { communicationUserId: '<ACS User ID>' }
+const acsCallee = { communicationUserId: '<Azure Communication Services User ID>' }
const call = callAgent.startCall([acsCallee]); ```
communication-services Teams User Calling https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/interop/teams-user-calling.md
Title: Azure Communication Services Teams identity overview
-description: Provides an overview of the support for Teams identity in ACS Calling SDK.
+description: Provides an overview of the support for Teams identity in Azure Communication Services Calling SDK.
Key features of the Calling SDK:
- **Addressing** - Azure Communication Services is using [Azure Active Directory user identifier](/powershell/module/azuread/get-azureaduser) to address communication endpoints. Clients use Azure Active Directory identities to authenticate to the service and communicate with each other. These identities are used in Calling APIs that provide clients visibility into who is connected to a call (the roster). And are also used in [Microsoft Graph API](/graph/api/user-get). - **Encryption** - The Calling SDK encrypts traffic and prevents tampering on the wire. - **Device Management and Media** - The Calling SDK provides facilities for binding to audio and video devices, encodes content for efficient transmission over the communications data plane, and renders content to output devices and views that you specify. APIs are also provided for screen and application sharing.-- **PSTN** - The Calling SDK can receive and initiate voice calls with the traditional publically switched telephony system, [using phone numbers you acquire in the Teams Admin Portal](/microsoftteams/pstn-connectivity).
+- **PSTN** - The Calling SDK can receive and initiate voice calls with the traditional publicly switched telephony system, [using phone numbers you acquire in the Teams Admin Portal](/microsoftteams/pstn-connectivity).
- **Teams Meetings** - The Calling SDK can [join Teams meetings](../../quickstarts/voice-video-calling/get-started-teams-interop.md) and interact with the Teams voice and video data plane. - **Notifications** - The Calling SDK provides APIs allowing clients to be notified of an incoming call. In situations where your app is not running in the foreground, patterns are available to [fire pop-up notifications](../notifications.md) ("toasts") to inform users of an incoming call.
-## Detailed ACS capabilities
+## Detailed Azure Communication Services capabilities
The following list presents the set of features, which are currently available in the Azure Communication Services Calling SDK for JavaScript.
communication-services Join Teams Meeting https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/join-teams-meeting.md
> [!IMPORTANT] > BYOI interoperability is now generally available to all Communication Services applications and Teams organizations.
-Azure Communication Services can be used to build applications that enable users to join and participate in Teams meetings. [Standard ACS pricing](https://azure.microsoft.com/pricing/details/communication-services/) applies to these users, but there's no additional fee for the interoperability capability itself. With the bring your own identity (BYOI) model, you control user authentication and users of your applications don't need Teams licenses to join Teams meetings. This is ideal for applications that enable licensed Teams users and external users using a custom application to join into a virtual consultation experience. For example, healthcare providers using Teams can conduct teleheath virtual visits with their patients who use a custom application.
+Azure Communication Services can be used to build applications that enable users to join and participate in Teams meetings. [Standard Azure Communication Services pricing](https://azure.microsoft.com/pricing/details/communication-services/) applies to these users, but there's no additional fee for the interoperability capability itself. With the bring your own identity (BYOI) model, you control user authentication and users of your applications don't need Teams licenses to join Teams meetings. This is ideal for applications that enable licensed Teams users and external users using a custom application to join into a virtual consultation experience. For example, healthcare providers using Teams can conduct teleheath virtual visits with their patients who use a custom application.
It's also possible to use Teams identities with the Azure Communication Services SDKs. More information is available [here](./teams-interop.md).
communication-services Known Issues https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/known-issues.md
Incoming video streams won't stop rendering if the user is on iOS 15.2+ and is u
### iOS with Safari crashes and refreshes the page if a user tries to switch from front camera to back camera.
-ACS Calling SDK version 1.2.3-beta.1 introduced a bug that affects all of the calls made from iOS Safari. The problem occurs when a user tries to switch the camera video stream from front to back. Switching camera results in Safari browser to crash and reload the page.
+Azure Communication Services Calling SDK version 1.2.3-beta.1 introduced a bug that affects all of the calls made from iOS Safari. The problem occurs when a user tries to switch the camera video stream from front to back. Switching camera results in Safari browser to crash and reload the page.
-This issue is fixed in ACS Calling SDK version 1.3.1-beta.1 +
+This issue is fixed in Azure Communication Services Calling SDK version 1.3.1-beta.1 +
* iOS Safari version: 15.1
communication-services Logging And Diagnostics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/logging-and-diagnostics.md
Next, select the archive target you want. Currently, we support storage accounts
New settings take effect in about ten minutes. Logs will begin appearing in the configured archival target within the Logs pane of your Communication Services resource. For more information about configuring diagnostics, see the overview of [Azure resource logs](../../azure-monitor/essentials/platform-logs-overview.md).
communication-services Metrics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/metrics.md
Both Chat and SMS API request metrics contain three dimensions that you can use
More information on supported aggregation types and time series aggregations can be found [Advanced features of Azure Metrics Explorer](../../azure-monitor/essentials/metrics-charts.md#aggregation) -- **Operation** - All operations or routes that can be called on the ACS Chat gateway.
+- **Operation** - All operations or routes that can be called on the Azure Communication Services Chat gateway.
- **Status Code** - The status code response sent after the request. - **StatusSubClass** - The status code series sent after the response.
communication-services Network Traversal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/network-traversal.md
Real-time Relays solve the problem of NAT (Network Address Translation) traversal for Peer-to-Peer (P2P) connections. Most devices on the internet today have an IP address used for internal LAN traffic (home or corporate network) or an externally visible address (router or NAT gateway). To connect two devices on the internet, the external address is required, but is typically not available to a device behind a NAT gateway. To address the connectivity issue, following protocols are used:
- STUN (Session Traversal Utilities for NAT) offers a protocol to allow devices to exchange external IPs on the internet. If the clients can see each other, there is typically no need for a relay through a TURN service since the connection can be made peer-to-peer. A STUN server's job is to respond to request for a device's external IP.
-
- TURN (Traversal Using Relays around NAT) is an extension of the STUN protocol that also relays the data between two endpoints through a mutually visible server.
-
-## ACS Network Traversal Overview
+* STUN (Session Traversal Utilities for NAT) offers a protocol to allow devices to exchange external IPs on the internet. If the clients can see each other, there is typically no need for a relay through a TURN service since the connection can be made peer-to-peer. A STUN server's job is to respond to request for a device's external IP.
+* TURN (Traversal Using Relays around NAT) is an extension of the STUN protocol that also relays the data between two endpoints through a mutually visible server.
-WebRTC(Web Real-Time Technologies) allow web browsers to stream audio, video, and data between devices without needing to have a gateway in the middle. Some of the common use cases here are voice, video, broadcasting, and screen sharing. To connect two endpoints on the internet, their external IP address is required. External IP is typically not available for devices sitting behind a corporate firewall. The protocols like STUN (Session Traversal Utilities for NAT) and TURN (Traversal Using Relays around NAT) are used to help the endpoints communicate.
+## Azure Communication Services Network Traversal Overview
-Azure Communication Service provides high bandwidth, low latency connections between peers for real-time communications scenarios. The ACS Network Traversal Service hosts TURN servers for use with the NAT scenarios. Azure Real-Time Relay Service exposes the existing STUN/TURN infrastructure as a Platform as a Service(PaaS) Azure offering. The service will provide low-level STUN and TURN services. Users are then billed proportional to the amount of data relayed.
+WebRTC(Web Real-Time Technologies) allow web browsers to stream audio, video, and data between devices without needing to have a gateway in the middle. Some of the common use cases here are voice, video, broadcasting, and screen sharing. To connect two endpoints on the internet, their external IP address is required. External IP is typically not available for devices sitting behind a corporate firewall. The protocols like STUN (Session Traversal Utilities for NAT) and TURN (Traversal Using Relays around NAT) are used to help the endpoints communicate.
+Azure Communication Service provides high bandwidth, low latency connections between peers for real-time communications scenarios. The Azure Communication Services Network Traversal Service hosts TURN servers for use with the NAT scenarios. Azure Real-Time Relay Service exposes the existing STUN/TURN infrastructure as a Platform as a Service(PaaS) Azure offering. The service will provide low-level STUN and TURN services. Users are then billed proportional to the amount of data relayed.
## Next Steps:
communication-services Sub Eligibility Number Capability https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/numbers/sub-eligibility-number-capability.md
The tables below summarize current availability:
## Next steps
-For additional information about ACS' telephony options please see the following pages:
+For additional information about Azure Communication Services' telephony options please see the following pages:
- [Learn more about Telephony](../telephony/telephony-concept.md) - Get a Telephony capable [phone number](../../quickstarts/telephony/get-phone-number.md)
communication-services Pricing https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/pricing.md
Please refer to the following links for details on SMS and Telephony pricing
## Next Steps
-Get started with ACS
+Get started with Azure Communication
- [Send an SMS](../quickstarts/sms/send.md) - [Add Voice calling to your app](../quickstarts/voice-video-calling/getting-started-with-calling.md)
communication-services Emergency Calling Concept https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/telephony/emergency-calling-concept.md
## Overview
-Azure Communication Calling SDK can be used to add Enhanced 911 dialing and Public Safety Answering Point (PSAP) call-back support to your applications in the United States (US) & Puerto Rico (PR). The capability to dial 911 and receive a call-back may be a requirement for your application. Verify the E911 requirements with your legal counsel.
+Azure Communication Calling SDK can be used to add Enhanced 911 dialing and Public Safety Answering Point (PSAP) call-back support to your applications in the United States (US) & Puerto Rico. The capability to dial 911 and receive a call-back may be a requirement for your application. Verify the E911 requirements with your legal counsel.
-Calls to 911 are routed over the Microsoft network. Microsoft assigns a temporary phone number as the Call Line Identity (CLI) when 911 calls from the US & PR are placed. Microsoft temporarily maintains a mapping of the phone number to the caller's identity. If there is a call-back from the PSAP, we route the call directly to the originating 911 caller. The caller can accept incoming PSAP call even if inbound calling is disabled.
+Calls to 911 are routed over the Microsoft network. Microsoft assigns a temporary phone number as the Call Line Identity (CLI) when 911 calls from the US & Puerto Rico are placed. Microsoft temporarily maintains a mapping of the phone number to the caller's identity. If there is a call-back from the PSAP, we route the call directly to the originating 911 caller. The caller can accept incoming PSAP call even if inbound calling is disabled.
The service is available for Microsoft phone numbers. It requires that the Azure resource from where the 911 call originates has a Microsoft-issued phone number enabled with outbound dialing (also referred to as ΓÇÿmake calls').
-ACS direct routing is currently in public preview and not intended for production workloads. So E911 dialing is out of scope for ACS direct routing.
+Azure Communication Services direct routing is currently in public preview and not intended for production workloads. So E911 dialing is out of scope for Azure Communication Services direct routing.
## The call flow
-1. An ACS user identity dials 911 using the ACS Calling SDK from the US or PR
+1. An Azure Communication Services user identity dials 911 using the Calling SDK from the USA or Puerto Rico
1. Microsoft validates the Azure resource has a Microsoft phone number enabled for outbound dialing
-1. Microsoft ACS 911 service replaces the userΓÇÖs phone number `alternateCallerId` with a temporary unique phone number. This number allocation remains in place for at least 60 minutes from the time that 911 is first dialed
+1. Microsoft Azure Communication Services 911 service replaces the userΓÇÖs phone number `alternateCallerId` with a temporary unique phone number. This number allocation remains in place for at least 60 minutes from the time that 911 is first dialed
1. Microsoft maintains a temporary record (for approximately 60 minutes) of the userΓÇÖs identity to the unique phone number 1. The 911 call will be first routed to a call center where an agent will request the callerΓÇÖs address
-1. The call center will then route the call to the appropriate PSAP in the US or PR
+1. The call center will then route the call to the appropriate PSAP in the USA or Puerto Rico
1. If the 911 call is unexpectedly dropped, the PSAP then makes a call-back to the user 1. On receiving the call-back within 60 minutes, Microsoft will route the inbound call directly to the user identity, which initiated the 911 call
Emergency dialing is automatically enabled for all users of the Azure Communicat
1. Microsoft uses the ISO 3166-1 alpha-2 standard
- 1. Microsoft supports a country US and PR ISO codes for 911 dialing
+ 1. Microsoft supports a country US and Puerto Rico ISO codes for 911 dialing
1. If the country code is not provided to the SDK, the IP address is used to determine the country of the caller
- 1. If the IP address cannot provide reliable geolocation, for example the user is on a Virtual Private Network, it is required to set the ISO Code of the calling country using the API in the ACS Calling SDK. See example in the E911 quick start
+ 1. If the IP address cannot provide reliable geo-location, for example the user is on a Virtual Private Network, it is required to set the ISO Code of the calling country using the API in the Azure Communication Services Calling SDK. See example in the E911 quick start
1. If users are dialing from a US territory (for example Guam, US Virgin Islands, Northern Marianas, or American Samoa), it is required to set the ISO code to the US
- 1. If the caller is outside of the US and PR, the call to 911 will not be permitted
+ 1. If the caller is outside of the US and Puerto Rico, the call to 911 will not be permitted
1. When testing your application dial 933 instead of 911. 933 number is enabled for testing purposes; the recorded message will confirm the phone number the emergency call originates from. You should hear a temporary number assigned by Microsoft and is not the `alternateCallerId` provided by the application 1. Ensure your application supports [receiving an incoming call](../../how-tos/calling-sdk/manage-calls.md#receive-an-incoming-call) so call-backs from the PSAP are appropriately routed to the originator of the 911 call. To test inbound calling is working correctly, place inbound VoIP calls to the user of the Calling SDK
-The Emergency service is temporarily free to use for ACS customers within reasonable use, however, billing for the service will be enabled in 2022. Calls to 911 are capped at 10 concurrent calls per Azure resource.
+The Emergency service is temporarily free to use for Azure Communication Services customers within reasonable use, however, billing for the service will be enabled in 2022. Calls to 911 are capped at 10 concurrent calls per Azure resource.
## Next steps
communication-services Call Recording https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/voice-video-calling/call-recording.md
Many countries and states have laws and regulations that apply to the recording
Regulations around the maintenance of personal data require the ability to export user data. In order to support these requirements, recording metadata files include the participantId for each call participant in the `participants` array. You can cross-reference the MRIs in the `participants` array with your internal user identities to identify participants in a call. An example of a recording metadata file is provided below for reference. ## Availability
-Currently, ACS Call Recording APIs are available in C# and Java.
+Currently, Azure Communication Services Call Recording APIs are available in C# and Java.
## Next steps Check out the [Call Recoding Quickstart](../../quickstarts/voice-video-calling/call-recording-sample.md) to learn more.
communication-services Media Quality Sdk https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/voice-video-calling/media-quality-sdk.md
Title: Azure Communication Services Media Quality metrics
-description: Provides an overview of the ACS media quality statics SDK.
+description: Provides an overview of the Azure Communication Services media quality statics SDK.
# Media quality statistics
-When working with calls in Azure Communication Services, there will be times that you need to know the media quality statistics that are being generated within an ACS call. To help understand these details, we have a feature called "Media quality statistics" that you can use to examine the low-level audio, video, and screen-sharing quality metrics.
+When working with calls in Azure Communication Services, there will be times that you need to know the media quality statistics that are being generated within an Azure Communication Services call. To help understand these details, we have a feature called "Media quality statistics" that you can use to examine the low-level audio, video, and screen-sharing quality metrics.
### Media quality statistics for ongoing call > **NOTE**
To dispose all collectors, invoke `disposeAllCollectors` method of `mediaStatsAp
mediaStatsFeature.disposeAllCollectors(); ``` ## Best practices
-If you want to collect this data for off-line inspection (after a call ends) it is recommended to collect this data and send it to your pipeline ingest after your call has ended. If you transmit this data during an current all you could use needed internet bandwidth that could be needed to help continue an ACS call (in cases when available bandwidth is low).
+If you want to collect this data for off-line inspection (after a call ends) it is recommended to collect this data and send it to your pipeline ingest after your call has ended. If you transmit this data during an current all you could use needed internet bandwidth that could be needed to help continue an Azure Communication Services call (in cases when available bandwidth is low).
### Bandwidth metrics | Metric Name | Purpose | Detailed explanation | Comments |
If you want to collect this data for off-line inspection (after a call ends) it
| audioSendPacketsLost | Sent packet loss | The number of audio packets sent that were lost (not received) in the last second. Results are packets per second (over the last second). | Lower is better. | | audioRecvPackets | Received packets | The number of audio packets received in the last second. Results are packets per second (over the last second). | Information only. | | audioSendCodecName | Sent codec | Audio CODEC used. | Information only. |
-| audioSendRtt | Send Round-Trip Time | Round trip time between your system and ACS server. Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. |
+| audioSendRtt | Send Round-Trip Time | Round trip time between your system and Azure Communication Services server. Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. |
| audioSendPairRtt | Send Pair Round-Trip Time | Round trip time for entire transport. Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. | | audioRecvPairRtt | Receive Pair Round-Trip Time | Round trip time for entire transport Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. | | audioSendAudioInputLevel | Input level for microphone | Sent audio playout level. If source data is between 0-1, media stack multiplies it with 0xFFFF. Depends on microphone. Used to confirm if microphone is silent (no incoming energy). | Microphone input level. |
If you want to collect this data for off-line inspection (after a call ends) it
| videoSendPackets | Sent packets | The number of video packets sent. Results are packets per second (over the last second). | Information only | | VideoSendCodecName | Sent codec | Video CODEC used for encoding video | VP8 (1:1 calls) and H264 | | videoRecvJitterBufferMs | Received Jitter | Jitter is the amount of difference in packet delay (in milliseconds (ms)) | Lower is better. |
-| videoSendRtt | Send Round-Trip Time | Response time between your system and ACS server. Lower is better | A round trip time of 200 ms or less is recommended. |
-| videoSendPairRtt | Send Pair Round-Trip Time | Response time between your system and ACS server. Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. |
+| videoSendRtt | Send Round-Trip Time | Response time between your system and Azure Communication Services server. Lower is better | A round trip time of 200 ms or less is recommended. |
+| videoSendPairRtt | Send Pair Round-Trip Time | Response time between your system and Azure Communication Services server. Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. |
| videoRecvPairRtt | Receive Pair Round-Trip Time | Round trip time for entire transport. Results are in milliseconds (ms). | A round trip time of 200 ms or less is recommended. | | videoRecvFrameRateReceived | Received frame rate | Frame rate of video currently received | 25-30 fps (360p or better)<br>8-15 fps (270p or lower) | | videoRecvFrameWidthReceived | Received width | Width of video currently received | 1920, 1280, 960, 640, 480, 320 |
communication-services User Facing Diagnostics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/concepts/voice-video-calling/user-facing-diagnostics.md
The following user-facing diagnostics are available:
| Name | Description | Possible values | Use cases | Mitigation steps | | | -- | - | -- | | | noNetwork | There is no network available. | - Set to`True` when a call fails to start because there is no network available. <br/> - Set to `False` when there are ICE candidates present. | Device is not connected to a network. | Ensure that the call has a reliable internet connection that can sustain a voice call. See the [Network optimization](network-requirements.md#network-optimization) section for more details. |
-| networkRelaysNotReachable | Problems with a network. | - Set to`True` when the network has some constraint that is not allowing you to reach ACS relays. <br/> - Set to `False` upon making a new call. | During a call when the WiFi signal goes on and off. | Ensure that firewall rules and network routing allow client to reach Microsoft turn servers. See the [Firewall configuration](network-requirements.md#firewall-configuration) section for more details. |
+| networkRelaysNotReachable | Problems with a network. | - Set to`True` when the network has some constraint that is not allowing you to reach Azure Communication Services relays. <br/> - Set to `False` upon making a new call. | During a call when the WiFi signal goes on and off. | Ensure that firewall rules and network routing allow client to reach Microsoft turn servers. See the [Firewall configuration](network-requirements.md#firewall-configuration) section for more details. |
| networkReconnect | The connection was lost and we are reconnecting to the network. | - Set to`Bad` when the network is disconnected <br/> - Set to `Poor`when the media transport connectivity is lost <br/> - Set to `Good` when a new session is connected. | Low bandwidth, no internet | Ensure that the call has a reliable internet connection that can sustain a voice call. See the [Network bandwidth requirement](network-requirements.md#network-bandwidth) section for more details. | | networkReceiveQuality | An indicator regarding incoming stream quality. | - Set to`Bad` when there is a severe problem with receiving the stream. <br/> - Set to `Poor` when there is a mild problem with receiving the stream. <br/> - Set to `Good` when there is no problem with receiving the stream. | Low bandwidth | Ensure that the call has a reliable internet connection that can sustain a voice call. See the [Network bandwidth requirement](network-requirements.md#network-bandwidth) section for more details. Also consider suggesting to end user to turn off their camera to conserve available internet bandwidth. | | networkSendQuality | An indicator regarding outgoing stream quality. | - Set to`Bad` when there is a severe problem with sending the stream. <br/> - Set to `Poor` when there is a mild problem with sending the stream. <br/> - Set to `Good` when there is no problem with sending the stream. | Low bandwidth | Ensure that the call has a reliable internet connection that can sustain a voice call. See the [Network bandwidth requirement](network-requirements.md#network-bandwidth) section for more details. Also consider suggesting to end user to turn off their camera to conserve available internet bandwidth. |
The following user-facing diagnostics are available:
| -- | | | | | | noSpeakerDevicesEnumerated | There is no audio output device (speaker) on the user's system. | - Set to`True` when there are no speaker devices on the system, and speaker selection is supported. <br/> - Set to `False` when there is a least 1 speaker device on the system, and speaker selection is supported. | All speakers are unplugged | When value set to`True` consider giving visual notification to end user that their current call session does not have any speakers available. | | speakingWhileMicrophoneIsMuted | Speaking while being on mute. | - Set to`True` when local microphone is muted and the local user is speaking. <br/> - Set to `False` when local user either stops speaking, or unmutes the microphone. <br/> \* Note: Currently, this option isn't supported on Safari because the audio level samples are taken from WebRTC stats. | During a call, mute your microphone and speak into it. | When value set to`True` consider giving visual notification to end user that they might be talking and not realizing that their audio is muted. |
-| noMicrophoneDevicesEnumerated | No audio capture devices (microphone) on the user's system | - Set to`True` when there are no microphone devices on the system. <br/> - Set to `False` when there is at least 1 microphone device on the system. | All microphones are unplugged during the call. | When value set to`True` consider giving visual notification to end user that their current call session does not have a microphone. See how to [enable microphone from device manger](../best-practices.md#plug-in-microphone-or-enable-microphone-from-device-manager-when-acs-call-in-progress) for more details. |
+| noMicrophoneDevicesEnumerated | No audio capture devices (microphone) on the user's system | - Set to`True` when there are no microphone devices on the system. <br/> - Set to `False` when there is at least 1 microphone device on the system. | All microphones are unplugged during the call. | When value set to`True` consider giving visual notification to end user that their current call session does not have a microphone. See how to [enable microphone from device manger](../best-practices.md#plug-in-microphone-or-enable-microphone-from-device-manager-when-azure-communication-services-call-in-progress) for more details. |
| microphoneNotFunctioning | Microphone is not functioning. | - Set to`True` when we fail to start sending local audio stream because the microphone device may have been disabled in the system or it is being used by another process. This UFD takes about 10 seconds to get raised. <br/> - Set to `False` when microphone starts to successfully send audio stream again. | No microphones available, microphone access disabled in a system | When value set to`True` give visual notification to end user that there is a problem with their microphone. |
-| microphoneMuteUnexpectedly | Microphone is muted | - Set to`True` when microphone enters muted state unexpectedly. <br/> - Set to `False` when microphone starts to successfully send audio stream | Microphone is muted from the system. Most cases happen when user is on an ACS call on a mobile device and a phone call comes in. In most cases the operating system will mute the ACS call so a user can answer the phone call. | When value is set to`True` give visual notification to end user that their call was muted because a phone call came in. See how to best handle [OS muting an ACS call](../best-practices.md#handle-os-muting-call-when-phone-call-comes-in) section for more details. |
-| microphonePermissionDenied | There is low volume from device or itΓÇÖs almost silent on macOS. | - Set to`True` when audio permission is denied by system settings (audio). <br/> - Set to `False` on successful stream acquisition. <br/> Note: This diagnostic only works on macOS. | Microphone permissions are disabled in the Settings. | When value is set to`True` give visual notification to end user that they did not enable permission to use microphone for an ACS call. |
+| microphoneMuteUnexpectedly | Microphone is muted | - Set to`True` when microphone enters muted state unexpectedly. <br/> - Set to `False` when microphone starts to successfully send audio stream | Microphone is muted from the system. Most cases happen when user is on an Azure Communication Services call on a mobile device and a phone call comes in. In most cases the operating system will mute the Azure Communication Services call so a user can answer the phone call. | When value is set to`True` give visual notification to end user that their call was muted because a phone call came in. See how to best handle [OS muting an Azure Communication Services call](../best-practices.md#handle-os-muting-call-when-phone-call-comes-in) section for more details. |
+| microphonePermissionDenied | There is low volume from device or itΓÇÖs almost silent on macOS. | - Set to`True` when audio permission is denied by system settings (audio). <br/> - Set to `False` on successful stream acquisition. <br/> Note: This diagnostic only works on macOS. | Microphone permissions are disabled in the Settings. | When value is set to`True` give visual notification to end user that they did not enable permission to use microphone for an Azure Communication Services call. |
### Camera values | Name | Description | Possible values | Use cases | Mitigation steps | | | -- | - | | - |
-| cameraFreeze | Camera stops producing frames for more than 5 seconds. | - Set to`True` when the local video stream is frozen. This means the remote side is seeing your video frozen on their screen or it means that the remote participants are not rendering your video on their screen. <br/> - Set to `False` when the freeze ends and users can see your video as per normal. | The Camera was lost during the call or bad network caused the camera to freeze. | When value is set to`True` consider giving notification to end user that the remote participant network might be bad - possibly suggest that they turn off their camera to conserve bandwidth. See the [Network bandwidth requirement](network-requirements.md#network-bandwidth) section for more details on needed internet abilities for an ACS call. |
+| cameraFreeze | Camera stops producing frames for more than 5 seconds. | - Set to`True` when the local video stream is frozen. This means the remote side is seeing your video frozen on their screen or it means that the remote participants are not rendering your video on their screen. <br/> - Set to `False` when the freeze ends and users can see your video as per normal. | The Camera was lost during the call or bad network caused the camera to freeze. | When value is set to`True` consider giving notification to end user that the remote participant network might be bad - possibly suggest that they turn off their camera to conserve bandwidth. See the [Network bandwidth requirement](network-requirements.md#network-bandwidth) section for more details on needed internet abilities for an Azure Communication Services call. |
| cameraStartFailed | Generic camera failure. | - Set to`True` when we fail to start sending local video because the camera device may have been disabled in the system or it is being used by another process~. <br/> - Set to `False` when selected camera device successfully sends local video again. | Camera failures | When value is set to`True` give visual notification to end user that their camera failed to start. | | cameraStartTimedOut | Common scenario where camera is in bad state. | - Set to`True` when camera device times out to start sending video stream. <br/> - Set to `False` when selected camera device successfully sends local video again. | Camera failures | When value is set to`True` give visual notification to end user that their camera is possibly having problems. (When value is set back to `False` remove notification). |
-| cameraPermissionDenied | Camera permissions were denied in settings. | - Set to`True` when camera permission is denied by system settings (video). <br/> - Set to `False` on successful stream acquisition. <br> Note: This diagnostic only works on macOS Chrome. | Camera permissions are disabled in the settings. | When value is set to`True` give visual notification to end user that they did not enable permission to use camera for an ACS call. |
+| cameraPermissionDenied | Camera permissions were denied in settings. | - Set to`True` when camera permission is denied by system settings (video). <br/> - Set to `False` on successful stream acquisition. <br> Note: This diagnostic only works on macOS Chrome. | Camera permissions are disabled in the settings. | When value is set to`True` give visual notification to end user that they did not enable permission to use camera for an Azure Communication Services call. |
| cameraStoppedUnexpectedly | Camera malfunction | - Set to`True` when camera enters stopped state unexpectedly. <br/> - Set to `False` when camera starts to successfully send video stream again. | Check camera is functioning correctly. | When value is set to`True` give visual notification to end user that their camera is possibly having problems. (When value is set back to `False` remove notification). | ### Misc values
The following user-facing diagnostics are available:
| Name | Description | Possible values | Use cases | Mitigation Steps | | | -- | -- | :- | -- |
-| screenshareRecordingDisabled | System screen sharing was denied by preferences in Settings. | - Set to`True` when screen sharing permission is denied by system settings (sharing). <br/> - Set to `False` on successful stream acquisition. <br/> Note: This diagnostic only works on macOS.Chrome. | Screen recording is disabled in Settings. | When value is set to`True` give visual notification to end user that they did not enable permission to share their screen for an ACS call. |
+| screenshareRecordingDisabled | System screen sharing was denied by preferences in Settings. | - Set to`True` when screen sharing permission is denied by system settings (sharing). <br/> - Set to `False` on successful stream acquisition. <br/> Note: This diagnostic only works on macOS.Chrome. | Screen recording is disabled in Settings. | When value is set to`True` give visual notification to end user that they did not enable permission to share their screen for an Azure Communication Services call. |
| capturerStartFailed | System screen sharing failed. | - Set to`True` when we fail to start capturing the screen. <br/> - Set to `False` when capturing the screen can start successfully. | | When value is set to`True` give visual notification to end user that there was possibly a problem sharing their screen. (When value is set back to `False` remove notification). | | capturerStoppedUnexpectedly | System screen sharing malfunction | - Set to`True` when screen capturer enters stopped state unexpectedly. <br/> - Set to `False` when screen capturer starts to successfully capture again. | Check screen sharing is functioning correctly | When value is set to`True` give visual notification to end user that there possibly a problem that causes sharing their screen to stop. (When value is set back to `False` remove notification). |
communication-services Subscribe Events https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/how-tos/router-sdk/subscribe-events.md
If the storage account, queue or system topic doesn't exist, they'll be created
- **Azure Communication Services Resource Name**: The name of your Azure Communication Services resource. For example, if the endpoint to your resource is `https://contoso.communication.azure.net`, then set to `contoso`. - **Storage Name**: The name of your Azure Storage Account. If it doesn't exist, it will be created. - **Event Sub Name**: The name of the event subscription to create.-- **System Topic Name**: If you have existing event subscriptions on your ACS resource, find the `System Topic` name in the `Events` tab of your ACS resource. Otherwise, specify a unique name such as the ACS resource name itself.
+- **System Topic Name**: If you have existing event subscriptions on your Azure Communication Services resource, find the `System Topic` name in the `Events` tab of your Azure Communication Services resource. Otherwise, specify a unique name such as the Azure Communication Services resource name itself.
- **Queue Name**: The name of your Queue within your Storage Account. If it doesn't exist, it will be created. ### Deployed resources
communication-services Quickstart Botframework Integration https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/quickstarts/chat/quickstart-botframework-integration.md
You'll learn how to:
- [Create and deploy a bot](#step-1create-and-deploy-a-bot) - [Get an Azure Communication Services Resource](#step-2get-an-azure-communication-services-resource)-- [Enable Communication Services' Chat Channel for the bot](#step-3enable-acs-chat-channel)
+- [Enable Communication Services' Chat Channel for the bot](#step-3enable-azure-communication-services-chat-channel)
- [Create a chat app and add bot as a participant](#step-4create-a-chat-app-and-add-bot-as-a-participant) - [Explore additional features available for bot](#more-things-you-can-do-with-bot)
You'll learn how to:
## Step 1 - Create and deploy a bot
-In order to use ACS chat as a channel in Azure Bot Service, the first step would be to deploy a bot. Please follow these steps:
+In order to use Azure Communication Services chat as a channel in Azure Bot Service, the first step would be to deploy a bot. Please follow these steps:
### Provision a bot service resource in Azure
The final step would be to deploy the bot logic to the Web App we created. As we
:::image type="content" source="./media/smaller-deployment-config.png" alt-text="Deployment config" lightbox="./media/deployment-config.png"::: ## Step 2 - Get an Azure Communication Services Resource
-Now that you got the bot part sorted out, we'll need to get an ACS resource, which we would use for configuring the ACS channel.
+Now that you got the bot part sorted out, we'll need to get an Azure Communication Services resource, which we would use for configuring the Azure Communication Services channel.
1. Create an Azure Communication Services resource. For details, see [Create an Azure Communication Services resource](../../quickstarts/create-communication-resource.md). You'll need to **record your resource endpoint and key** for this quickstart.
-2. Create a ACS User and issue a user access token [User Access Token](../../quickstarts/access-tokens.md). Be sure to set the scope to **chat**, and **note the token string as well as the userId string**.
+2. Create a Azure Communication Services User and issue a user access token [User Access Token](../../quickstarts/access-tokens.md). Be sure to set the scope to **chat**, and **note the token string as well as the userId string**.
-## Step 3 - Enable ACS Chat Channel
-With the ACS resource, we can configure the ACS channel in Azure Bot to bind an ACS User ID with a bot. Note that currently, only the allowlisted Azure account will be able to see Azure Communication Services - Chat channel.
+## Step 3 - Enable Azure Communication Services Chat Channel
+With the Azure Communication Services resource, we can configure the Azure Communication Services channel in Azure Bot to bind an Azure Communication Services User ID with a bot. Note that currently, only the allowlisted Azure account will be able to see Azure Communication Services - Chat channel.
1. Go to your Bot Services resource on Azure portal. Navigate to `Channels` blade and click on `Azure Communications Services - Chat` channel from the list provided. :::image type="content" source="./media/smaller-demoapp-launch-acs-chat.png" alt-text="DemoApp Launch Acs Chat" lightbox="./media/demoapp-launch-acs-chat.png":::
-2. Provide the resource endpoint and the key belonging to the ACS resource that you want to connect with.
+2. Provide the resource endpoint and the key belonging to the Azure Communication Services resource that you want to connect with.
:::image type="content" source="./media/smaller-demoapp-connect-acsresource.png" alt-text="DemoApp Connect Acs Resource" lightbox="./media/demoapp-connect-acsresource.png":::
-3. Once the provided resource details are verified, you'll see the **bot's ACS ID** assigned. With this ID, you can add the bot to the conversation at whenever appropriate using Chat's AddParticipant API. Once the bot is added as participant to a chat, it will start receiving chat related activities and can respond back in the chat thread.
+3. Once the provided resource details are verified, you'll see the **bot's Azure Communication Services ID** assigned. With this ID, you can add the bot to the conversation at whenever appropriate using Chat's AddParticipant API. Once the bot is added as participant to a chat, it will start receiving chat related activities and can respond back in the chat thread.
:::image type="content" source="./media/smaller-demoapp-bot-detail.png" alt-text="DemoApp Bot Detail" lightbox="./media/demoapp-bot-detail.png"::: ## Step 4 - Create a chat app and add bot as a participant
-Now that you have the bot's ACS ID, you'll be able to create a chat thread with bot as a participant.
+Now that you have the bot's Azure Communication Services ID, you'll be able to create a chat thread with bot as a participant.
### Create a new C# application ```console
namespace ChatQuickstart
### Start a chat thread with the bot
-Use the `createChatThread` method on the chatClient to create a chat thread, replace with the bot's ACS ID you obtained.
+Use the `createChatThread` method on the chatClient to create a chat thread, replace with the bot's Azure Communication Services ID you obtained.
```csharp var chatParticipant = new ChatParticipant(identifier: new CommunicationUserIdentifier(id: "<BOT_ID>")) {
Besides simple text message, bot is also able to receive and send many other act
- Event activity ### Send a welcome message when a new user is added to the thread
-With the current Echo Bot logic, it accepts input from the user and echoes it back. If you would like to add additional logic such as responding to a participant added ACS event, copy the following code snippets and paste into the source file: [EchoBot.cs](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/csharp_dotnetcore/02.echo-bot/Bots/EchoBot.cs)
+With the current Echo Bot logic, it accepts input from the user and echoes it back. If you would like to add additional logic such as responding to a participant added Azure Communication Services event, copy the following code snippets and paste into the source file: [EchoBot.cs](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/csharp_dotnetcore/02.echo-bot/Bots/EchoBot.cs)
```csharp using System.Threading;
await turnContext.SendActivityAsync(reply, cancellationToken);
``` You can find sample payloads for adaptive cards at [Samples and Templates](https://adaptivecards.io/samples)
-And on the ACS User side, the ACS message's metadata field will indicate this is a message with attachment.The key is microsoft.azure.communication.chat.bot.contenttype, which is set to the value azurebotservice.adaptivecard. This is an example of the chat message that will be received:
+And on the Azure Communication Services User side, the Azure Communication Services message's metadata field will indicate this is a message with attachment.The key is microsoft.azure.communication.chat.bot.contenttype, which is set to the value azurebotservice.adaptivecard. This is an example of the chat message that will be received:
```json {
communication-services Get Started Raw Media Access https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/quickstarts/voice-video-calling/get-started-raw-media-access.md
In this quickstart, you'll learn how implement raw media access using the Azure
## Outbound virtual video device
-The ACS Calling SDK offers APIs allowing apps to generate their own video frames to send to remote participants.
+The Azure Communication Services Calling SDK offers APIs allowing apps to generate their own video frames to send to remote participants.
This quick start builds upon [QuickStart: Add 1:1 video calling to your app](./get-started-with-video-calling.md?pivots=platform-android) for Android.
This quick start builds upon [QuickStart: Add 1:1 video calling to your app](./g
Once an outbound virtual video device is created, use DeviceManager to make a new virtual video device that behaves just like any other webcam connected to your computer or mobile phone.
-Since the app will be generating the video frames, the app must inform the ACS Calling SDK about the video formats the app is capable of generating. This is required to allow the ACS Calling SDK to pick the best video format configuration given the network conditions at any giving time.
+Since the app will be generating the video frames, the app must inform the Azure Communication Services Calling SDK about the video formats the app is capable of generating. This is required to allow the Azure Communication Services Calling SDK to pick the best video format configuration given the network conditions at any giving time.
The app must register a delegate to get notified about when it should start or stop producing video frames. The delegate event will inform the app which video format is more appropriate for the current network conditions.
communication-services Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/samples/overview.md
# Samples
-Azure Communication Services has many samples available, which you can use to test out ACS services and features before creating your own application or use case.
+Azure Communication Services has many samples available, which you can use to test out Azure Communication Services services and features before creating your own application or use case.
## Application samples
Azure Communication Services has many samples available, which you can use to te
| [Calling Hero Sample](./calling-hero-sample.md) | Provides a sample of creating a calling application. | [Web](https://github.com/Azure-Samples/communication-services-web-calling-hero), [iOS](https://github.com/Azure-Samples/communication-services-ios-calling-hero), [Android](https://github.com/Azure-Samples/communication-services-android-calling-hero) | | [Chat Hero Sample](./chat-hero-sample.md) | Provides a sample of creating a chat application. | [Web](https://github.com/Azure-Samples/communication-services-web-chat-hero) | | [Trusted Authentication Server Sample](./trusted-auth-sample.md) | Provides a sample implementation of a trusted authentication service used to generate user and access tokens for Azure Communication Services. The service by default maps generated identities to Azure Active Directory | [node.JS](https://github.com/Azure-Samples/communication-services-authentication-hero-nodejs), [C#](https://github.com/Azure-Samples/communication-services-authentication-hero-csharp)
-| [Web Calling Sample](./web-calling-sample.md) | A step by step walk-through of ACS Calling features, including PSTN, within the Web. | [Web](https://github.com/Azure-Samples/communication-services-web-calling-tutorial/) |
+| [Web Calling Sample](./web-calling-sample.md) | A step by step walk-through of Azure Communication Services Calling features, including PSTN, within the Web. | [Web](https://github.com/Azure-Samples/communication-services-web-calling-tutorial/) |
| [Network Traversal Sample]( https://github.com/Azure-Samples/communication-services-network-traversal-hero) | Sample app demonstrating network traversal functionality | Node.js ## Quickstart samples
communication-services Building App Start https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/tutorials/building-app-start.md
Paste this code into `https://docsupdatetracker.net/index.html`:
<head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
- <title>My first ACS application</title>
+ <title>My first Azure Communication Services application</title>
<link rel="stylesheet" href="./app.css"/> <script src="./app.js" defer></script> </head> <body>
- <h1>Hello from ACS!</h1>
+ <h1>Hello from Azure Communication Services!</h1>
</body> </html> ```
communication-services Postman Tutorial https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/communication-services/tutorials/postman-tutorial.md
Title: Tutorial - Sign and make requests to ACS' SMS API with Postman
+ Title: Tutorial - Sign and make requests to Azure Communication Services' SMS API with Postman
-description: Learn how to sign and makes requests for ACS with Postman to send an SMS Message.
+description: Learn how to sign and makes requests for Azure Communication Services with Postman to send an SMS Message.
In this tutorial we'll be:
- An Azure account with an active subscription. For details, see [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F). The free account gives you $200 in Azure credits to try out any combination of services. - An active Communication Services resource and connection string. [Learn how to create a Communication Services resource](../quickstarts/create-communication-resource.md).-- An ACS Telephone number that can send SMS messages, see our [Get a phone number](../quickstarts/telephony/get-phone-number.md) to get one.
+- An Azure Communication Services Telephone number that can send SMS messages, see our [Get a phone number](../quickstarts/telephony/get-phone-number.md) to get one.
## Downloading and installing Postman
Postman, can organize requests in many ways. For the purposes of this tutorial.
:::image type="content" source="media/postman/collections-tab.png" alt-text="Postman's main screen with the Collections tab highlighted.":::
-Once selected, click "Create new Collection", to start the collection creation process. A new tab will open in the center area of Postman. Name the collection whatever you'd like. Here the collection is named "ACS":
+Once selected, click "Create new Collection", to start the collection creation process. A new tab will open in the center area of Postman. Name the collection whatever you'd like. Here the collection is named "Azure Communication Services":
:::image type="content" source="media/postman/acs-collection.png" alt-text="Postman with a Communication Services Collection opened and the name of the collection highlighted.":::
pm.request.headers.upsert({
Enter or paste this final script, into the text area within the Pre-request Script Tab: Once entered, press CTRL + S or press the save button this will save the script to the collection.
In the text area below you'll need to enter a request body, it should be in the
```JSON {
- "from":"<Your ACS Telephone Number>",
+ "from":"<Your Azure Communication Services Telephone Number>",
"message":"<The message you'd like to send>", "smsRecipients": [ {
In the text area below you'll need to enter a request body, it should be in the
} ```
-For the "from" value, you'll need to [get a telephone number](../quickstarts/telephony/get-phone-number.md) in the Azure Communication Services Portal as previously mentioned. Enter it without any spaces and prefixed by your country code. For example: `+15555551234`. Your "message" can be whatever you'd like to send but `Hello from ACS` is a good example. The "to" value should be a phone you have access to that can receive SMS messages. Using your own mobile is a good idea.
+For the "from" value, you'll need to [get a telephone number](../quickstarts/telephony/get-phone-number.md) in the Azure Communication Services Portal as previously mentioned. Enter it without any spaces and prefixed by your country code. For example: `+15555551234`. Your "message" can be whatever you'd like to send but `Hello from Azure Communication Services` is a good example. The "to" value should be a phone you have access to that can receive SMS messages. Using your own mobile is a good idea.
Once entered, we need to save this request into the Communication Services Collection that we previously created. This will ensure that it picks up the variables and pre-request script that we previously created. To do, this click the "save" button in the top right of the request area.
container-apps Microservices Dapr Azure Resource Manager https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/container-apps/microservices-dapr-azure-resource-manager.md
You learn how to:
With Azure Container Apps, you get a fully managed version of the Dapr APIs when building microservices. When you use Dapr in Azure Container Apps, you can enable sidecars to run next to your microservices that provide a rich set of capabilities. Available Dapr APIs include [Service to Service calls](https://docs.dapr.io/developing-applications/building-blocks/service-invocation/), [Pub/Sub](https://docs.dapr.io/developing-applications/building-blocks/pubsub/), [Event Bindings](https://docs.dapr.io/developing-applications/building-blocks/bindings/), [State Stores](https://docs.dapr.io/developing-applications/building-blocks/state-management/), and [Actors](https://docs.dapr.io/developing-applications/building-blocks/actors/).
-In this tutorial, you deploy the same applications from the Dapr [Hello World](https://github.com/dapr/quickstarts/tree/master/hello-kubernetes) quickstart.
+In this tutorial, you deploy the same applications from the Dapr [Hello World](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes) quickstart.
The application consists of:
Remove-AzResourceGroup -Name $RESOURCE_GROUP -Force
## Next steps > [!div class="nextstepaction"]
-> [Application lifecycle management](application-lifecycle-management.md)
+> [Application lifecycle management](application-lifecycle-management.md)
container-apps Microservices Dapr https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/container-apps/microservices-dapr.md
You learn how to:
With Azure Container Apps, you get a fully managed version of the Dapr APIs when building microservices. When you use Dapr in Azure Container Apps, you can enable sidecars to run next to your microservices that provide a rich set of capabilities. Available Dapr APIs include [Service to Service calls](https://docs.dapr.io/developing-applications/building-blocks/service-invocation/), [Pub/Sub](https://docs.dapr.io/developing-applications/building-blocks/pubsub/), [Event Bindings](https://docs.dapr.io/developing-applications/building-blocks/bindings/), [State Stores](https://docs.dapr.io/developing-applications/building-blocks/state-management/), and [Actors](https://docs.dapr.io/developing-applications/building-blocks/actors/).
-In this tutorial, you deploy the same applications from the Dapr [Hello World](https://github.com/dapr/quickstarts/tree/master/hello-kubernetes) quickstart.
+In this tutorial, you deploy the same applications from the Dapr [Hello World](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes) quickstart.
The application consists of:
container-registry Container Registry Auto Purge https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/container-registry/container-registry-auto-purge.md
The `acr purge` container command deletes images by tag in a repository that mat
At a minimum, specify the following when you run `acr purge`: * `--filter` - A repository and a *regular expression* to filter tags in the repository. Examples: `--filter "hello-world:.*"` matches all tags in the `hello-world` repository, and `--filter "hello-world:^1.*"` matches tags beginning with `1`. Pass multiple `--filter` parameters to purge multiple repositories.
-* `--ago` - A Go-style [duration string](https://golang.org/pkg/time/) to indicate a duration beyond which images are deleted. The duration consists of a sequence of one or more decimal numbers, each with a unit suffix. Valid time units include "d" for days, "h" for hours, and "m" for minutes. For example, `--ago 2d3h6m` selects all filtered images last modified more than 2 days, 3 hours, and 6 minutes ago, and `--ago 1.5h` selects images last modified more than 1.5 hours ago.
+* `--ago` - A Go-style [duration string](https://go.dev/pkg/time/) to indicate a duration beyond which images are deleted. The duration consists of a sequence of one or more decimal numbers, each with a unit suffix. Valid time units include "d" for days, "h" for hours, and "m" for minutes. For example, `--ago 2d3h6m` selects all filtered images last modified more than 2 days, 3 hours, and 6 minutes ago, and `--ago 1.5h` selects images last modified more than 1.5 hours ago.
`acr purge` supports several optional parameters. The following two are used in examples in this article:
cosmos-db Manage Data Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cosmos-db/cassandra/manage-data-go.md
Azure Cosmos DB is a multi-model database service that lets you quickly create a
## Prerequisites - An Azure account with an active subscription. [Create one for free](https://azure.microsoft.com/free/?WT.mc_id=cassandrago-docs-abhishgu). Or [try Azure Cosmos DB for free](https://azure.microsoft.com/try/cosmosdb/?WT.mc_id=cassandrago-docs-abhishgu) without an Azure subscription.-- [Go](https://golang.org/) installed on your computer, and a working knowledge of Go.
+- [Go](https://go.dev/) installed on your computer, and a working knowledge of Go.
- [Git](https://git-scm.com/downloads). ## Create a database account
cosmos-db Create Mongodb Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cosmos-db/mongodb/create-mongodb-go.md
The sample application is a command-line based `todo` management tool written in
## Prerequisites - An Azure account with an active subscription. [Create one for free](https://azure.microsoft.com/free). Or [try Azure Cosmos DB for free](https://azure.microsoft.com/try/cosmosdb/) without an Azure subscription. You can also use the [Azure Cosmos DB Emulator](https://aka.ms/cosmosdb-emulator) with the connection string `.mongodb://localhost:C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==@localhost:10255/admin?ssl=true`.-- [Go](https://golang.org/) installed on your computer, and a working knowledge of Go.
+- [Go](https://go.dev/) installed on your computer, and a working knowledge of Go.
- [Git](https://git-scm.com/downloads). [!INCLUDE [azure-cli-prepare-your-environment-no-header.md](../../../includes/azure-cli-prepare-your-environment-no-header.md)]
cosmos-db Create Cosmosdb Resources Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/cosmos-db/sql/create-cosmosdb-resources-portal.md
Add data to your new database using Data Explorer.
If you wish to delete just the database and use the Azure Cosmos account in future, you can delete the database with the following steps:
-* Got to your Azure Cosmos account.
+* Go to your Azure Cosmos account.
* Open **Data Explorer**, right click on the database that you want to delete and select **Delete Database**. * Enter the Database ID/database name to confirm the delete operation.
devtest-labs Devtest Lab Add Vm https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/devtest-labs/devtest-lab-add-vm.md
You need at least [user](devtest-lab-add-devtest-user.md#devtest-labs-user) acce
<a name="add-artifacts-during-installation"></a> ## Add optional artifacts
-Artifacts are tools, actions, or software you can add to lab VMs. You can add artifacts to VMs from the [DevTest Labs public artifact repository](https://github.com/Azure/azure-devtestlab/Artifacts), or from private artifact repositories connected to the lab. For more information, see [Add artifacts to DevTest Labs VMs](add-artifact-vm.md).
+Artifacts are tools, actions, or software you can add to lab VMs. You can add artifacts to VMs from the [DevTest Labs public artifact repository](https://github.com/Azure/azure-devtestlab/tree/master/Artifacts), or from private artifact repositories connected to the lab. For more information, see [Add artifacts to DevTest Labs VMs](add-artifact-vm.md).
To add or modify artifacts during VM creation:
Or, if you chose **Make this machine claimable** during VM creation, select **Cl
- [Add artifacts to VMs after creation](add-artifact-vm.md#add-artifacts-to-vms-from-the-azure-portal). - Create DevTest Labs VMs by using [PowerShell](devtest-lab-vm-powershell.md), [Azure CLI](devtest-lab-vmcli.md), an [ARM template](devtest-lab-use-resource-manager-template.md), or from a [shared image gallery](add-vm-use-shared-image.md).-- Explore the DevTest Labs public repositories of [artifacts](https://github.com/Azure/azure-devtestlab/Artifacts), [environments](https://github.com/Azure/azure-devtestlab/Environments), and [QuickStart ARM templates](https://github.com/Azure/azure-devtestlab/samples/DevTestLabs/QuickStartTemplates).
+- Explore the DevTest Labs public repositories of [artifacts](https://github.com/Azure/azure-devtestlab/tree/master/Artifacts), [environments](https://github.com/Azure/azure-devtestlab/tree/master/Environments), and [QuickStart ARM templates](https://github.com/Azure/azure-devtestlab/tree/master/samples/DevTestLabs/QuickStartTemplates).
devtest-labs Devtest Lab Create Lab https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/devtest-labs/devtest-lab-create-lab.md
This quickstart walks you through creating a lab in Azure DevTest Labs by using
- **Resource group**: Select an existing resource group from the dropdown list, or select **Create new** to create a new resource group so it's easy to delete later. - **Lab Name**: Enter a name for the lab. - **Location**: If you're creating a new resource group, select an Azure region for the resource group and lab.
- - **Public environments**: Leave **On** for access to the [DevTest Labs public environment repository](https://github.com/Azure/azure-devtestlab/Environments). Set to **Off** to disable access. For more information, see [Enable public environments when you create a lab](devtest-lab-create-environment-from-arm.md#enable-public-environments-when-you-create-a-lab).
+ - **Public environments**: Leave **On** for access to the [DevTest Labs public environment repository](https://github.com/Azure/azure-devtestlab/tree/master/Environments). Set to **Off** to disable access. For more information, see [Enable public environments when you create a lab](devtest-lab-create-environment-from-arm.md#enable-public-environments-when-you-create-a-lab).
:::image type="content" source="./media/devtest-lab-create-lab/portal-create-basic-settings.png" alt-text="Screenshot of the Basic Settings tab in the Create DevTest Labs form.":::
devtest-labs Encrypt Storage https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/devtest-labs/encrypt-storage.md
Azure Storage encrypts lab data with a Microsoft-managed key. Optionally, you ca
For more information and instructions on configuring customer-managed keys for Azure Storage encryption, see: -- [Use customer-managed keys with Azure Key Vault to manage Azure Storage encryption](/azure/storage/common/customer-managed-keys-overview.md)
+- [Use customer-managed keys with Azure Key Vault to manage Azure Storage encryption](/azure/storage/common/customer-managed-keys-overview)
- [Configure encryption with customer-managed keys stored in Azure Key Vault](/azure/storage/common/customer-managed-keys-configure-key-vault) ## Next steps
event-hubs Event Hubs Go Get Started Send https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/event-hubs/event-hubs-go-get-started-send.md
This tutorial describes how to write Go applications to send events to or receiv
To complete this tutorial, you need the following prerequisites: -- Go installed locally. Follow [these instructions](https://golang.org/doc/install) if necessary.
+- Go installed locally. Follow [these instructions](https://go.dev/doc/install) if necessary.
- An active Azure account. If you don't have an Azure subscription, create a [free account][] before you begin. - **Create an Event Hubs namespace and an event hub**. Use the [Azure portal](https://portal.azure.com) to create a namespace of type Event Hubs, and obtain the management credentials your application needs to communicate with the event hub. To create a namespace and an event hub, follow the procedure in [this article](event-hubs-create.md).
event-hubs Monitor Event Hubs Reference https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/event-hubs/monitor-event-hubs-reference.md
Azure Event Hubs supports the following dimensions for metrics in Azure Monitor.
Runtime audit logs capture aggregated diagnostic information for all data plane access operations (such as send or receive events) in the Event Hubs dedicated cluster. > [!NOTE]
-> Runtime audit logs are currently available only in the **dedicated** tier.
+> Runtime audit logs are currently available only in **premium** and **dedicated** tiers.
Runtime audit logs include the elements listed in the following table:
Here's an example of a runtime audit log entry:
Application metrics logs capture the aggregated information on certain metrics related to data plane operations. The captured information includes the following runtime metrics. > [!NOTE]
-> Application metrics logs are currently available only in the **dedicated** tier.
+> Application metrics logs are currently available only in **premium** and **dedicated** tiers.
Name | Description - | -
expressroute Expressroute Faqs https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/expressroute/expressroute-faqs.md
Title: FAQ - Azure ExpressRoute | Microsoft Docs
description: The ExpressRoute FAQ contains information about Supported Azure Services, Cost, Data and Connections, SLA, Providers and Locations, Bandwidth, and other Technical Details. - Previously updated : 03/29/2021 Last updated : 03/24/2022
You will also have to follow up with your connectivity provider to ensure that t
### How do I change the bandwidth of an ExpressRoute circuit?
-You can update the bandwidth of the ExpressRoute circuit using the Azure Portal, REST API, PowerShell, or Azure CLI.
+You can update the bandwidth of the ExpressRoute circuit using the Azure portal, REST API, PowerShell, or Azure CLI.
### I received a notification about maintenance on my ExpressRoute circuit. What is the technical impact of this maintenance?
You should experience minimal to no impact during maintenance if you operate you
### I received a notification about a software upgrade or maintenance on my ExpressRoute gateway. What is the technical impact of this maintenance?
-You should experience minimal to no impact during a software upgrade or maintenance on your gateway. The ExpressRoute gateway is comprised of multiple instance and during upgrades, instances are taken offline one at a time. While this may cause your gateway to temporarily support lower network throughput to the virtual network, the gateway itself will not experience any downtime.
+You should experience minimal to no impact during a software upgrade or maintenance on your gateway. The ExpressRoute gateway is composed of multiple instances and during upgrades, instances are taken offline one at a time. While this may cause your gateway to temporarily support lower network throughput to the virtual network, the gateway itself will not experience any downtime.
+
+## ExpressRoute SKU scope access
+
+### What is the connectivity scope for different ExpressRoute circuit SKUs?
+
+The following diagram shows the connectivity scope of different ExpressRoute circuit SKUs. In this example, your on-premises network is connected to an ExpressRoute peering site in London. With a Local SKU ExpressRoute circuit you can connect to resources in Azure regions in the same metro as the peering site. In this case, your on-premises network can access UK South Azure resources over ExpressRoute. For more information, see [What is ExpressRoute Local?](#what-is-expressroute-local). When you configure a Standard SKU ExpressRoute circuit, connectivity to Azure resources will expand to all Azure regions in a geopolitical area. As explained in the diagram, your on-premises can connect to resources in West Europe and France Central. To allow your on-premises network to access resources globally across all Azure regions, you'll need to configure an ExpressRoute premium SKU circuit. For more information, see [What is ExpressRoute premium?](#what-is-expressroute-premium).
## ExpressRoute premium
expressroute How To Configure Custom Bgp Communities https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/expressroute/how-to-configure-custom-bgp-communities.md
BGP communities are groupings of IP prefixes tagged with a community value. This
``` > [!IMPORTANT]
-> If your existing virtual network is already connected to an ExpressRoute circuit, you'll need to delete and recreate the ExpressRoute connection after applying the custom BGP community value. See [link a virtual network to an ExpressRoute circuit](expressroute-howto-linkvnet-arm.md), to learn how.
+> If your virtual network is peered with other virtual networks, you'll need to delete and recreate each vnet peering after applying the custom BGP community value. See [Connect virtual networks with virtual network peering](../virtual-network/tutorial-connect-virtual-networks-portal.md), to learn how.
> ## Next steps
frontdoor Front Door Quickstart Template Samples https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/frontdoor/front-door-quickstart-template-samples.md
The following table includes links to Azure Resource Manager deployment model te
| [Azure Functions with Private Link](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.cdn/front-door-premium-function-private-link) | Creates an Azure Functions app with a private endpoint, and a Front Door profile. | |**API Management origins**| **Description** | | [API Management (external)](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.cdn/front-door-standard-premium-api-management-external) | Creates an API Management instance with external VNet integration, and a Front Door profile. |
+| [API Management with Private Link](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.cdn/front-door-premium-api-management-private-link) | Creates an API Management instance with a private endpoint, and a Front Door profile. |
|**Storage origins**| **Description** | | [Storage static website](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.cdn/front-door-standard-premium-storage-static-website) | Creates an Azure Storage account and static website with a public endpoint, and a Front Door profile. | | [Storage blobs with Private Link](https://github.com/Azure/azure-quickstart-templates/tree/master/quickstarts/microsoft.cdn/front-door-premium-storage-blobs-private-link) | Creates an Azure Storage account and blob container with a private endpoint, and a Front Door profile. |
germany Germany Developer Guide https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-developer-guide.md
- Title: Azure Germany developer guide | Microsoft Docs
-description: This article compares features and provides guidance on developing applications for Azure Germany.
- Previously updated : 10/16/2020------
-# Azure Germany developer guide
--
-The Azure Germany environment is an instance of Microsoft Azure that is separate from the rest of the Microsoft network. This guide discusses the differences that application developers and administrators must understand to interact and work with separate regions of Azure.
-
-## Overview
-Microsoft provides various tools to help developers create and deploy cloud applications to the global Microsoft Azure services ("global Azure") and Microsoft Azure Germany services. Azure Germany addresses the security and compliance needs of customers to follow German data privacy regulations. Azure Germany offers physical and network isolation from global Azure deployments and provides a data trustee acting under German law.
-
-When developers create and deploy applications to Azure Germany, as opposed to global Azure, they need to know the differences between the two sets of services. The specific areas to understand are: setting up and configuring their programming environment, configuring endpoints, writing applications, and deploying the applications as services to Azure Germany.
-
-The information in this guide summarizes these differences. It supplements the information that's available on the [Azure Germany](https://azure.microsoft.com/overview/clouds/germany/ "Azure Germany") site and the [Azure Documentation Center](https://azure.microsoft.com/documentation/).
-
-Official information might also be available in other locations, such as:
-* [Microsoft Azure Trust Center](https://azure.microsoft.com/support/trust-center/ "Microsoft Azure Trust Center")
-* [Azure blog](https://azure.microsoft.com/blog/ "Azure blog")
-* [Azure Germany blog](/archive/blogs/azuregermany/ "Azure Germany blog")
-
-## Guidance for developers
-Most of the currently available technical content assumes that applications are being developed for global Azure rather than for Azure Germany. For this reason, it's important to be aware of two key differences in applications that you develop for hosting in Azure Germany:
-
-* Certain services and features that are in specific regions of global Azure might not be available in Azure Germany.
-* Feature configurations in Azure Germany might differ from those in global Azure. It's important to review your sample code, configurations, and steps to ensure that you are building and executing within the Azure Germany Cloud Services environment.
-
-Currently, Germany Central and Germany Northeast are the regions that are available in Azure Germany. For regions and available services, see [Products available by region](https://azure.microsoft.com/regions/services).
--
-## Endpoint mapping
-To learn about mapping global Azure and Azure SQL Database endpoints to Azure Germany-specific endpoints, see the following table:
-
-| Name | Azure Germany endpoint |
-| | |
-| ActiveDirectoryServiceEndpointResourceId | `https://management.core.cloudapi.de/` |
-| GalleryUrl | `https://gallery.cloudapi.de/` |
-| ManagementPortalUrl | `https://portal.microsoftazure.de/` |
-| ServiceManagementUrl | `https://management.core.cloudapi.de/` |
-| PublishSettingsFileUrl | `https://manage.microsoftazure.de/publishsettings/index` |
-| ResourceManagerUrl | `https://management.microsoftazure.de/` |
-| SqlDatabaseDnsSuffix | `.database.cloudapi.de` |
-| StorageEndpointSuffix | `core.cloudapi.de` |
-| ActiveDirectoryAuthority | `https://login.microsoftonline.de/` |
-| GraphUrl | `https://graph.cloudapi.de/` |
-| TrafficManagerDnsSuffix | `azuretrafficmanager.de` |
-| AzureKeyVaultDnsSuffix | `vault.microsoftazure.de` |
-| AzureKeyVaultServiceEndpointResourceId | `https://vault.microsoftazure.de` |
-| Service Bus Suffix | `servicebus.cloudapi.de` |
--
-## Next steps
-For more information about Azure Germany, see the following resources:
-
-* [Sign up for a trial](https://azure.microsoft.com/free/germany/)
-* [Acquiring Azure Germany](https://azure.microsoft.com/overview/clouds/germany/)
-* [Sign-in page](https://portal.microsoftazure.de/) if you already have an Azure Germany account
-* [Azure Germany overview](./germany-welcome.md)
-* [Azure Germany blog](/archive/blogs/azuregermany/)
-* [Azure compliance](https://www.microsoft.com/en-us/trustcenter/compliance/complianceofferings)
germany Germany Get Started Connect With Cli https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-get-started-connect-with-cli.md
- Title: Connect to Azure Germany by using Azure CLI | Microsoft Docs
-description: Information on managing your subscription in Azure Germany by using Azure CLI
- Previously updated : 10/16/2020------
-# Connect to Azure Germany by using Azure CLI
--
-To use the Azure CLI, you need to connect to Azure Germany instead of global Azure. You can use the Azure CLI to manage a large subscription through scripts or to access features that are not currently available in the Azure portal. If you have used Azure CLI in global Azure, it's mostly the same.
-
-## Azure CLI
-There are multiple ways to [install the Azure CLI](/cli/azure/install-az-cli2).
-
-To connect to Azure Germany, set the cloud:
-
-```azurecli
-az cloud set --name AzureGermanCloud
-```
-
-After the cloud is set, you can log in:
-
-```azurecli
-az login --username your-user-name@your-tenant.onmicrosoft.de
-```
-
-To confirm that the cloud is correctly set to AzureGermanCloud, run either of the following commands and then verify that the `isActive` flag is set to `true` for the AzureGermanCloud item:
-
-```azurecli
-az cloud list
-```
-
-```azurecli
-az cloud list --output table
-```
-
-## Azure classic CLI
-There are multiple ways to [install Azure classic CLI](/cli/azure/install-azure-cli). If you already have Node installed, the easiest way is to install the npm package.
-
-To install CLI from an npm package, make sure you have downloaded and installed the [latest Node.js and npm](https://nodejs.org/en/download/package-manager/). Then, run **npm install** to install the **azure-cli** package:
-
-```bash
-npm install -g azure-cli
-```
-
-On Linux distributions, you might need to use **sudo** to successfully run the **npm** command, as follows:
-
-```bash
-sudo npm install -g azure-cli
-```
-
-> [!NOTE]
-> If you need to install or update Node.js and npm on your Linux distribution or OS, we recommend that you install the most recent Node.js LTS version (4.x). If you use an older version, you might get installation errors.
--
-After Azure CLI is installed, log in to Azure Germany:
-
-```console
-azure login --username your-user-name@your-tenant.onmicrosoft.de --environment AzureGermanCloud
-```
-
-After you're logged in, you can run Azure CLI commands as you normally would:
-
-```console
-azure webapp list my-resource-group
-```
-
-## Next steps
-For more information about connecting to Azure Germany, see the following resources:
-
-* [Connect to Azure Germany by using PowerShell](./germany-get-started-connect-with-ps.md)
-* [Connect to Azure Germany by using Visual Studio](./germany-get-started-connect-with-vs.md)
-* [Connect to Azure Germany by using the Azure portal](./germany-get-started-connect-with-portal.md)
germany Germany Get Started Connect With Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-get-started-connect-with-portal.md
- Title: Connect to Azure Germany by using the Azure portal | Microsoft Docs
-description: Information on managing your subscription in Azure Germany by using the portal
- Previously updated : 10/16/2020------
-# Connect to Azure Germany by using the Azure portal
--
-The [Azure portal](https://portal.microsoftazure.de/) is the primary way that most people connect to Azure Germany.
-
-> [!NOTE]
-> Only the new portal (the one that uses the Azure Resource Manager deployment model) is available for Azure Germany.
->
-
-You can create subscriptions for your account by going to the [account home page](https://account.windowsazure.de).
-
-## Next steps
-For more information about connecting to Azure Germany, see the following resources:
-
-* [Connect to Azure Germany by using PowerShell](./germany-get-started-connect-with-ps.md)
-* [Connect to Azure Germany by using Azure CLI](./germany-get-started-connect-with-cli.md)
-* [Connect to Azure Germany by using Visual Studio](./germany-get-started-connect-with-vs.md)
----
germany Germany Get Started Connect With Ps https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-get-started-connect-with-ps.md
- Title: Connect to Azure Germany by using PowerShell | Microsoft Docs
-description: Information on managing your subscription in Azure Germany by using PowerShell
- Previously updated : 10/16/2020------
-# Connect to Azure Germany by using PowerShell
--
-To use Azure PowerShell with Azure Germany, you need to connect to Azure Germany instead of global Azure. You can use Azure PowerShell to manage a large subscription through a script or to access features that are not currently available in the Azure portal. If you have used PowerShell in global Azure, it's mostly the same. The differences in Azure Germany are:
-
-* Connecting your account
-* Region names
--
-> [!NOTE]
-> If you have not used PowerShell yet, check out [Introduction to Azure PowerShell](/powershell/azure/).
-
-When you start PowerShell, you have to tell Azure PowerShell to connect to Azure Germany by specifying an environment parameter. The parameter ensures that PowerShell is connecting to the correct endpoints. The collection of endpoints is determined when you connect to your account. Different APIs require different versions of the environment switch:
-
-| Connection type | Command |
-| | |
-| [Azure (classic deployment model)](/powershell/azure) commands |`Add-AzureAccount -Environment AzureGermanCloud` |
-| [Azure (Resource Manager deployment model)](/powershell/azure) commands |`Connect-AzAccount -EnvironmentName AzureGermanCloud` |
-| [Azure Active Directory (classic deployment model)](/previous-versions/azure/jj151815(v=azure.100)) commands |`Connect-MsolService -AzureEnvironment AzureGermanyCloud` |
-| [Azure Active Directory (Resource Manager deployment model)](../azure-resource-manager/management/deployment-models.md) commands |`Connect-AzureAD -AzureEnvironmentName AzureGermanyCloud` |
-
-You can also use the `Environment` switch when connecting to a storage account by using `New-AzStorageContext`, and then specify `AzureGermanCloud`.
-
-## Determining region
-After you're connected, there is one more difference: the regions that are used to target a service. Every Azure cloud service has different regions. You can see them listed on the service availability page. You normally use the region in the `Location` parameter for a command.
--
-| Common name | Display name | Location name |
-| | | |
-| Germany Central |`Germany Central` | `germanycentral` |
-| Germany Northeast |`Germany Northeast` | `germanynortheast` |
--
-> [!NOTE]
-> As is true with PowerShell for global Azure, you can use either the display name or the location name for the `Location` parameter.
->
->
-
-If you ever want to validate the available regions in Azure Germany, you can run the following commands and print the current list. For classic deployments, use the first command. For Resource Manager deployments, use the second command.
-
-```azurepowershell
-Get-AzureLocation
-Get-AzLocation
-```
-
-If you're curious about the available environments across Azure, you can run:
-
-```azurepowershell
-Get-AzureEnvironment
-Get-AzEnvironment
-```
-
-## Next steps
-For more information about connecting to Azure Germany, see the following resources:
-
-* [Connect to Azure Germany by using Azure CLI](./germany-get-started-connect-with-cli.md)
-* [Connect to Azure Germany by using Visual Studio](./germany-get-started-connect-with-vs.md)
-* [Connect to Azure Germany by using the Azure portal](./germany-get-started-connect-with-portal.md)
germany Germany Get Started Connect With Vs https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-get-started-connect-with-vs.md
- Title: Connect to Azure Germany by using Visual Studio | Microsoft Docs
-description: Information on managing your subscription in Azure Germany by using Visual Studio
- Previously updated : 10/16/2020------
-# Connect to Azure Germany by using Visual Studio
--
-Developers use Visual Studio to easily manage their Azure subscriptions while building solutions. Currently, in the Visual Studio user interface, you can't configure a connection to Azure Germany.
-
-## Visual Studio 2017 and Visual Studio 2019
-
-Visual Studio requires a configuration file to connect to Azure Germany. With this file in place, Visual Studio connects to Azure Germany instead of global Azure.
-
-### Create a configuration file for Azure Germany
-
-Create a file named *AadProvider.Configuration.json* with the following content:
-
-```json
-{
- "AuthenticationQueryParameters":null,
- "AsmEndPoint":"https://management.microsoftazure.de/",
- "Authority":"https://login.microsoftonline.de/",
- "AzureResourceManagementEndpoint":"https://management.microsoftazure.de/",
- "AzureResourceManagementAudienceEndpoints":["https://management.core.cloudapi.de/"],
- "ClientIdentifier":"872cd9fa-d31f-45e0-9eab-6e460a02d1f1",
- "EnvironmentName":"BlackForest",
- "GraphEndpoint":"https://graph.cloudapi.de",
- "MsaHomeTenantId":"f577cd82-810c-43f9-a1f6-0cc532871050",
- "NativeClientRedirect":"urn:ietf:wg:oauth:2.0:oob",
- "PortalEndpoint":"https://portal.core.cloudapi.de/",
- "ResourceEndpoint":"https://management.microsoftazure.de/",
- "ValidateAuthority":true,
- "VisualStudioOnlineEndpoint":"https://app.vssps.visualstudio.com/",
- "VisualStudioOnlineAudience":"499b84ac-1321-427f-aa17-267ca6975798"
-}
-```
-
-### Update Visual Studio for Azure Germany
-
-1. Close Visual Studio.
-1. Place *AadProvider.Configuration.json* in *%localappdata%\\.IdentityService\AadConfigurations*. Create this folder if it isn't present.
-1. Start Visual Studio and begin using your Azure Germany account.
-
-> [!NOTE]
-> With the configuration file, only Azure Germany subscriptions are accessible. You still see subscriptions that you configured previously, but they don't work because Visual Studio is now connected to Azure Germany instead of global Azure. To connect to global Azure, remove the file.
->
-
-### Revert a Visual Studio connection to Azure Germany
-
-To enable Visual Studio to connect to global Azure, you need to remove the configuration file that enables the connection to Azure Germany.
-
-1. Close Visual Studio.
-1. Delete or rename the *%localappdata%\.IdentityService\AadConfigurations* folder.
-1. Restart Visual Studio and begin using your global Azure account.
-
-> [!NOTE]
-> After you revert this configuration, your Azure Germany subscriptions are no longer accessible.
->
-
-## Visual Studio 2015
-
-Visual Studio 2015 requires a registry change to connect to Azure Germany. After you set this registry key, Visual Studio connects to Azure Germany instead of global Azure.
-
-### Update Visual Studio 2015 for Azure Germany
-
-To enable Visual Studio to connect to Azure Germany, you need to update the registry.
-
-1. Close Visual Studio.
-1. Create a text file named *VisualStudioForAzureGermany.reg*.
-1. Copy and paste the following text into *VisualStudioForAzureGermany.reg*:
-
-```
-Windows Registry Editor Version 5.00
-
-[HKEY_CURRENT_USER\Software\Microsoft\VSCommon\ConnectedUser]
-"AadInstance"="https://login.microsoftonline.de/"
-"adaluri"="https://management.microsoftazure.de"
-"AzureRMEndpoint"="https://management.microsoftazure.de"
-"AzureRMAudienceEndpoint"="https://management.core.cloudapi.de"
-"EnableAzureRMIdentity"="true"
-"GraphUrl"="graph.cloudapi.de"
-"AadApplicationTenant"="f577cd82-810c-43f9-a1f6-0cc532871050"
-```
-
-1. Save and then run the file by double-clicking it. You're prompted to merge the file into your registry.
-1. Start Visual Studio and begin using [Cloud Explorer](/visualstudio/azure/vs-azure-tools-resources-managing-with-cloud-explorer) with your Azure Germany account.
-
-> [!NOTE]
-> After this registry key is set, only Azure Germany subscriptions are accessible. You still see subscriptions that you configured previously, but they don't work because Visual Studio is now connected to Azure Germany instead of global Azure. To connect to global Azure, revert the changes.
->
-
-### Revert a Visual Studio 2015 connection to Azure Germany
-
-To enable Visual Studio to connect to global Azure, you need to remove the registry settings that enable the connection to Azure Germany.
-
-1. Close Visual Studio.
-1. Create a text file named *VisualStudioForAzureGermany_Remove.reg*.
-1. Copy and paste the following text into *VisualStudioForAzureGermany_Remove.reg*:
-
-```
-Windows Registry Editor Version 5.00
-
-[HKEY_CURRENT_USER\Software\Microsoft\VSCommon\ConnectedUser]
-"AadInstance"=-
-"adaluri"=-
-"AzureRMEndpoint"=-
-"AzureRMAudienceEndpoint"=-
-"EnableAzureRMIdentity"=-
-"GraphUrl"=-
-```
-
-1. Save and then run the file by double-clicking it. You're prompted to merge the file into your registry.
-1. Start Visual Studio.
-
-> [!NOTE]
-> After you revert this registry key, your Azure Germany subscriptions appear but are not accessible. You can safely remove them.
->
-
-## Next steps
-
-For more information about connecting to Azure Germany, see the following resources:
-
-* [Connect to Azure Germany by using PowerShell](./germany-get-started-connect-with-ps.md)
-* [Connect to Azure Germany by using Azure CLI](./germany-get-started-connect-with-cli.md)
-* [Connect to Azure Germany by using the Azure portal](./germany-get-started-connect-with-portal.md)
germany Germany Howto Deploy Webandmobile https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-howto-deploy-webandmobile.md
- Title: Deploy an Azure App Service app by using Visual Studio 2015 | Microsoft Docs
-description: This article describes how to deploy an API app, web app, or mobile app to Azure Germany by using Visual Studio 2015 and the Azure SDK.
- Previously updated : 10/16/2020------
-# Deploy an Azure App Service app by using Visual Studio 2015
--
-This article describes how to deploy an Azure App Service app (API app, web app, or mobile app) to Azure Germany by using Visual Studio 2015.
-
-## Prerequisites
-* See [Visual Studio prerequisites](../app-service/quickstart-dotnetcore.md#prerequisites) to install and configure Visual Studio 2015 and the Azure SDK.
-* Follow [these instructions](./germany-get-started-connect-with-vs.md) to configure Visual Studio to connect to an Azure Germany account.
-
-## Open an app project in Visual Studio
-Open an existing app solution or project in Visual Studio or [create a project](../app-service/quickstart-dotnetcore.md?tabs=netframework48#create-an-aspnet-web-app). Then, run the app in Visual Studio to make sure it works locally.
-
-## Deploy to Azure Germany
-After Visual Studio is configured to connect to your Azure Germany account (which you already did as a prerequisite), [instructions to deploy an App Service app](../app-service/quickstart-dotnetcore.md) are exactly same as they are for global Azure.
-
-## Next steps
-* [Deploy an ASP.NET web app to Azure App Service by using Visual Studio](../app-service/quickstart-dotnetcore.md).
-* For general App Service information, see [App Service - API Apps documentation](../app-service/index.yml).
-* For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Image Gallery https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-image-gallery.md
- Title: Azure Germany images | Microsoft Docs
-description: This article provides an overview of the images included in the Azure Germany Marketplace
- Previously updated : 10/16/2020------
-# Azure Germany images
--
-## Overview
-When deploying new virtual servers in Azure Germany, customers can choose to deploy prebuilt images from Microsoft or upload their own VHDs. This flexibility means that you can deploy your own standardized images if needed.
-
-## Variations
-Marketplace images from our partners are not yet supported in Azure Germany.
-
-Following is a list of available images in the Azure Germany Marketplace. Note that each image might have different versions that aren't listed here. Some of the prebuilt images include pay-as-you-go licensing for specific software. Please review the [Azure pricing](https://azure.microsoft.com/pricing/) page for more guidance and work with your Microsoft account team or reseller.
-
-## Images for deployments with Azure Resource Manager
-
-|Publisher|Offer|SKU|Versions|
-| | | | |
-| Canonical | UbuntuServer | 12.04.5-LTS | 12.04.201603150 12.04.201605160 12.04.201608290 12.04.201610201 12.04.201701100 |
-| Canonical | UbuntuServer | 14.04.4-LTS | 14.04.201604060 14.04.201605160 14.04.201608300 |
-| Canonical | UbuntuServer | 14.04.5-LTS | 14.04.201610200 14.04.201701100 |
-| Canonical | UbuntuServer | 16.04-LTS | 16.04.201701130 |
-| Canonical | UbuntuServer | 16.04.0-LTS | 16.04.201604203 16.04.201605161 16.04.201609071 16.04.201610200 |
-| Canonical | UbuntuServer | 16.10 | 16.10.201701030 |
-| cloudera | cloudera-centos-os | 6_7 | 1.0.1 |
-| CoreOS | Container-Linux | Alpha | |
-| CoreOS | Container-Linux | Beta | |
-| CoreOS | Container-Linux | Stable | |
-| CoreOS | CoreOS | Alpha | 1068.0.0 1081.2.0 1097.0.0 1122.0.0 1151.0.0 1153.0.0 1164.0.0 1164.1.0 1180.0.0 1185.0.0 1192.0.0 1192.1.0 1192.2.0 1207.0.0 1214.0.0 1221.0.0 1235.0.0 1248.0.0 1248.1.0 1262.0.0 1284.1.0 1284.2.0 1298.1.0 1325.1.0 1339.0.0 1353.1.0 |
-| CoreOS | CoreOS | Beta | 1010.4.0 1068.3.0 1081.3.0 1122.1.0 1153.3.0 1153.4.0 1185.1.0 1185.2.0 1192.2.0 1235.1.0 1235.2.0 1248.3.0 1248.4.0 1298.4.0 1325.2.0 1353.2.0 |
-| CoreOS | CoreOS | Stable | 1010.5.0 1010.6.0 1068.10.0 1068.6.0 1068.8.0 1068.9.0 1122.2.0 1122.3.0 1185.3.0 1185.5.0 1235.12.0 1235.5.0 1235.6.0 1235.8.0 1298.5.0 1298.6.0 |
-| credativ | Debian | 7 | 7.0.201604200 7.0.201606240 7.0.201606280 7.0.201609120 7.0.201611020 7.0.201701180 |
-| credativ | Debian | 8 | 8.0.201604200 8.0.201606240 8.0.201606280 8.0.201609120 8.0.201611020 8.0.201701180 8.0.201703150 |
-| credativ | Debian | 8-backports | 8.0.201702060 |
-| credativ | Debian | 9-beta | 9.0.201702080 9.0.201703150 |
-| GE-SRS-Prod-GalleryImages | Process-Server | Windows-2012-R2-Datacenter | 201703.01.00 |
-| MicrosoftOSTC | FreeBSD | 10.3 | 10.3.20170112 |
-| MicrosoftOSTC | FreeBSD | 11.0 | 11.0.20161223 11.0.20170111 |
-| MicrosoftSharePoint | SharePoint2016 | SharePoint_Server_2016_Trial | 16.0.4351 |
-| MicrosoftSQLServer | sql2014sp1-ws2012r2 | Enterprise | 12.0.4100 |
-| MicrosoftSQLServer | SQL2014SP2-WS2012R2 | Enterprise | 12.0.50000 12.0.50001 |
-| MicrosoftSQLServer | SQL2014SP2-WS2012R2 | Express | 12.0.50000 |
-| MicrosoftSQLServer | SQL2014SP2-WS2012R2 | Standard | 12.0.50000 |
-| MicrosoftSQLServer | SQL2014SP2-WS2012R2 | Web | 12.0.50000 |
-| MicrosoftSQLServer | SQL2014SP2-WS2012R2-BYOL | Enterprise | 12.0.50000 |
-| MicrosoftSQLServer | SQL2014SP2-WS2012R2-BYOL | Standard | 12.0.50000 |
-| MicrosoftSQLServer | SQL2016-WS2012R2 | Enterprise | 13.0.31640 |
-| MicrosoftSQLServer | SQL2016-WS2012R2 | Express | 13.0.31641 |
-| MicrosoftSQLServer | SQL2016-WS2012R2 | SQLDEV | 13.0.31640 |
-| MicrosoftSQLServer | SQL2016-WS2012R2 | Standard | 13.0.31640 |
-| MicrosoftSQLServer | SQL2016-WS2012R2 | Web | 13.0.31640 |
-| MicrosoftSQLServer | SQL2016-WS2012R2-BYOL | Enterprise | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016-WS2016 | Enterprise | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016-WS2016 | SQLDEV | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016-WS2016 | Standard | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016-WS2016 | Web | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016-WS2016-BYOL | Enterprise | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016-WS2016-BYOL | Standard | 13.0.21640 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016 | Enterprise | 13.0.400110 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016 | Express | 13.0.400111 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016 | SQLDEV | 13.0.400110 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016 | Standard | 13.0.400110 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016 | Web | 13.0.400110 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016-BYOL | Enterprise | 13.0.400110 |
-| MicrosoftSQLServer | SQL2016SP1-WS2016-BYOL | Standard | 13.0.400110 |
-| MicrosoftVisualStudio | VisualStudio | VS-2015-Comm-AzureSDK-29-WS2012R2 | 2017.02.16 |
-| MicrosoftVisualStudio | VisualStudio | VS-2015-Ent-VSU3-AzureSDK-29-WS2012R2 | 2017.02.16 |
-| MicrosoftVisualStudio | VisualStudio | VS-2017-Ent-WS2016 | 2017.03.06 |
-| MicrosoftVisualStudio | VisualStudio | VS-2017-RC3-Comm-WS2016 | 2017.02.14 |
-| MicrosoftVisualStudio | VisualStudio | VS-2017-RC3-Ent-WS2016 | 2017.02.14 |
-| MicrosoftWindowsServer | WindowsServer | 2008-R2-SP1 | 2.0.20160125 2.0.20160229 2.0.20160430 2.0.20160617 2.0.20160721 2.0.20160812 2.0.20161214 2.0.20170110 2.0.20170316 2.127.20170406 |
-| MicrosoftWindowsServer | WindowsServer | 2012-Datacenter | 3.0.20160125 3.0.20160229 3.0.20160430 3.0.20160617 3.0.20160721 3.0.20160812 3.0.20161214 3.0.20170111 3.0.20170316 3.127.20170406 |
-| MicrosoftWindowsServer | WindowsServer | 2012-R2-Datacenter | 4.0.20160125 4.0.20160229 4.0.20160430 4.0.20160617 4.0.20160721 4.0.20160812 4.0.20161012 4.0.20161214 4.0.20170111 4.0.20170316 4.127.20170406 |
-| MicrosoftWindowsServer | WindowsServer | 2016-Datacenter | 2016.0.20161010 2016.0.20161213 2016.0.20170314 2016.127.20170406 |
-| MicrosoftWindowsServer | WindowsServer | 2016-Datacenter-Server-Core | 2016.0.20170314 2016.127.20170406 |
-| MicrosoftWindowsServer | WindowsServer | 2016-Datacenter-with-Containers | 2016.0.20161012 2016.0.20161025 2016.0.20161213 2016.0.20170314 2016.127.20170406 |
-| MicrosoftWindowsServer | WindowsServer | 2016-Nano-Server | 2016.0.20161012 2016.0.20161109 |
-| MicrosoftWindowsServer | WindowsServer | 2016-Nano-Server-Technical-Preview | |
-| MicrosoftWindowsServer | WindowsServer | 2016-Technical-Preview-with-Containers | |
-| MicrosoftWindowsServer | WindowsServer | Windows-Server-Technical-Preview | |
-| OpenLogic | CentOS | 6.5 | 6.5.20170207 |
-| OpenLogic | CentOS | 6.7 | 6.7.20160310 |
-| OpenLogic | CentOS | 6.8 | 6.8.20160620 6.8.20161026 6.8.20170105 |
-| OpenLogic | CentOS | 7.2 | 7.2.20160325 7.2.20160620 7.2.20161026 7.2.20161116 7.2.20170105 |
-| OpenLogic | CentOS | 7.3 | 7.3.20161221 |
-| RedHat | RHEL | 6.8 | 6.8.20161028 6.8.20161214 6.8.20170224 6.8.2017032020 |
-| RedHat | RHEL | 6.9 | 6.9.2017032807 |
-| RedHat | RHEL | 7.2 | 7.2.20161026 7.2.20170203 7.2.20170224 7.2.2017032020 |
-| RedHat | RHEL | 7.3 | 7.3.20161104 7.3.20170202 7.3.20170224 7.3.2017032020 |
-| RedHat | RHEL-SAP-APPS | 6.8 | 6.8.201703130 |
-| RedHat | RHEL-SAP-APPS | 7.3 | 7.3.201703130 |
-| RedHat | RHEL-SAP-HANA | 6.7 | 6.7.20170310 |
-| RedHat | RHEL-SAP-HANA | 7.2 | 7.2.20170310 |
-| SUSE | Infrastructure | SMT | |
-| SUSE | openSUSE-Leap | 42.1 | 2016.04.15 2016.11.21 |
-| SUSE | openSUSE-Leap | 42.2 | 2017.01.24 2017.03.20 |
-| SUSE | SLES | 11-SP4 | 2016.03.01 2016.08.12 2016.10.21 2016.12.21 2017.03.20 |
-| SUSE | SLES | 12-SP1 | 2016.03.01 2016.08.11 2016.10.21 |
-| SUSE | SLES | 12-SP2 | 2016.11.03 2017.03.20 |
-| SUSE | SLES-BYOS | 11-SP4 | 2016.09.16 2017.03.20 |
-| SUSE | SLES-BYOS | 12-SP2 | 2016.11.03 2017.03.20 |
-| SUSE | SLES-SAP-BYOS | 12-SP1 | 2016.12.16 2017.03.20 |
-| SUSE | SLES-SAP-BYOS | 12-SP2 | 2016.11.04 2017.03.20 |
-| SUSE | SLES-SAPCAL | 11-SP4 | |
-| SUSE | SUSE-Manager-Proxy-BYOS | 3.0 | 2017.03.20 |
-| SUSE | SUSE-Manager-Server-BYOS | 3.0 | 2017.03.20 |
--
-## Images for deployments with Azure Service Manager ("classic")
-| Publisher | Image Name | OS |
-| | | |
-| Credativ | Debian 7 "Wheezy" | Linux |
-| Credativ | Debian 8 "Jessie" | Linux |
-| Credativ | Debian 8 "Jessie" with backports | Linux |
-| Credativ | Debian 9 "Stretch" | Linux |
-| Visual Studio | Visual Studio Community 2015 Update 3 with Azure SDK 2.9 on Windows Server 2012 R2 | Windows |
-| Visual Studio | Visual Studio Enterprise 2015 Update 3 with Azure SDK 2.9 on Windows Server 2012 R2 | Windows |
-| Visual Studio | Visual Studio Enterprise 2017 on Windows Server 2016 (x64) | Windows |
-| Visual Studio | Visual Studio Community 2017 RC on Windows Server 2016 (x64) | Windows |
-| Visual Studio | Visual Studio Enterprise 2017 RC on Windows Server 2016 (x64) | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, January 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, February 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, April 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, June 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, July 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, August 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, December 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, March 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2008 R2 SP1, April 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, January 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, February 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, April 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, June 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, July 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, August 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, December 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, March 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 Datacenter, April 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, January 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, February 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, April 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, June 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, July 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, August 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, October 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, December 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, March 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2012 R2 Datacenter, April 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter, October 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter, December 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter, March 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter, April 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter - Server Core, March 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter - Server Core, April 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 - Nano Server, October 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 - Nano Server, December 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter with Containers, October 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter with Containers, December 2016 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter with Containers, March 2017 | Windows |
-| Microsoft Windows Server Product Group | Windows Server 2016 Datacenter with Containers, April 2017 | Windows |
-| SUSE | SUSE Manager 3.0 Proxy (Bring Your Own Subscription) | Linux |
-| SUSE | SUSE Manager 3.0 Server (Bring Your Own Subscription) | Linux |
-| SUSE | openSUSE Leap 42.2 | Linux |
-| SUSE | openSUSE Leap 42.1 | Linux |
-| SUSE | SUSE Linux Enterprise Server 11 SP4 (Bring Your Own Subscription) | Linux |
-| SUSE | SUSE Linux Enterprise Server 11 SP4 | Linux |
-| SUSE | SUSE Linux Enterprise Server 12 SP1 | Linux |
-| SUSE | SUSE Linux Enterprise Server 12 SP2 (Bring Your Own Subscription) | Linux |
-| SUSE | SUSE Linux Enterprise Server 12 SP2 | Linux |
-| SUSE | SUSE Linux Enterprise Server for SAP Applications 12 SP1 (Bring Your Own Subscription) | Linux |
-| SUSE | SUSE Linux Enterprise Server for SAP Applications 12 SP1 (Premium Image) (Bring Your Own Subscription) | Linux |
-| SUSE | SUSE Linux Enterprise Server for SAP Applications 12 SP2 (Bring Your Own Subscription) | Linux |
-| SUSE | SUSE Linux Enterprise Server for SAP Applications 12 SP2 (Premium Image) (Bring Your Own Subscription) | Linux |
-| OpenLogic | OpenLogic 6.5 | Linux |
-| OpenLogic | OpenLogic 6.7 | Linux |
-| OpenLogic | OpenLogic 6.8 | Linux |
-| OpenLogic | OpenLogic 7.2 | Linux |
-| OpenLogic | OpenLogic 7.3 | Linux |
-| Microsoft Open Source Technology Center for FreeBSD | FreeBSD 10.3 | Linux |
-| Microsoft Open Source Technology Center for FreeBSD | FreeBSD 11.0 | Linux |
-| Microsoft SharePoint Server Product Group | SharePoint Server 2016 Trial | Windows |
-| RedHat | Red Hat Enterprise Linux 6.7 | Linux |
-| RedHat | Red Hat Enterprise Linux 6.8 | Linux |
-| RedHat | Red Hat Enterprise Linux 6.9 | Linux |
-| RedHat | Red Hat Enterprise Linux 7.2 | Linux |
-| RedHat | Red Hat Enterprise Linux 7.3 | Linux |
-| coreos | CoreOS Alpha | Linux |
-| coreos | CoreOS Beta | Linux |
-| coreos | CoreOS Stable | Linux |
-| Cloudera, Inc. | Cloudera CentOS 6.7 | Linux |
-| Canonical | Ubuntu Server 12.04.5-LTS | Linux |
-| Canonical | Ubuntu Server 14.04.4-LTS | Linux |
-| Canonical | Ubuntu Server 14.04.5-LTS | Linux |
-| Canonical | Ubuntu Server 16.04 LTS | Linux |
-| Canonical | Ubuntu Server 16.10 | Linux |
-| Microsoft SQL Server Product Group | SQL Server 2014 SP2 Enterprise Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2014 SP2 Express on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2014 SP2 Standard on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2014 SP2 Web Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 SP1 Developer on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 SP1 Enterprise on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 SP1 Express on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 SP1 Standard on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 SP1 Web on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 Developer on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 RTM Developer on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 RTM Enterprise on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 RTM Enterprise on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 Express on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 Standard on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 Standard on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 Web on Windows Server 2012 R2 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2016 Web on Windows Server 2016 | Windows |
-| Microsoft SQL Server Product Group | SQL Server 2014 SP1 Enterprise on Windows Server 2012 R2 | Windows |
-| GE-SRS-Prod-GalleryImages | Microsoft Azure Site Recovery Process Server V2 | Windows |
---
-<!--Working with quickstart templates missing here. needs modification of quickstart repo -->
---
-## Next steps
-To uncover any programmatic differences with endpoints when you're working with Azure Germany, see the [Azure Germany developer guide](./germany-developer-guide.md).
-
-For more information on deploying from the Marketplace or creating your own VHD, see these resources:
-
-* [Deploying a Windows virtual machine](../virtual-machines/windows/quick-create-portal.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
-* [Windows virtual machines FAQ](../virtual-machines/windows/faq.yml?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
-* [Create a Linux VM custom image](../virtual-machines/linux/create-upload-generic.md?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
----
germany Germany Manage Subscriptions https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-manage-subscriptions.md
- Title: Azure Germany subscriptions | Microsoft Docs
-description: This article provides information on managing your subscription in Azure Germany.
- Previously updated : 10/16/2020------
-# Manage and connect to your subscription in Azure Germany
--
-Azure Germany has unique URLs and endpoints for managing your environment. It's important to use the right connections to manage your environment through the Azure portal or PowerShell. After you connect to the Azure Germany environment, the normal operations for managing a service work if the component has been deployed.
-
-> [!IMPORTANT]
->
->This applies to Azure customers in Microsoft Cloud Germany who purchase Azure services directly from Microsoft with pay-as-you-go pricing.
->
->Effective September 14, 2019, a new European Union mandate took effect that requires customer credit card payments to go through a strong customer authentication (SCA) challenge. As a result, customers on pay-as-you-go pricing are unable to create new subscriptions or add or update payment information, until they [migrate to a new Azure region](./germany-migration-main.md).
->
->
-
-## Manage subscription
-You can manage your Azure resources and view subscription billing information in the [Azure portal](https://portal.microsoftazure.de).
-
-## Manage account
-To manage your account information, such as your contact info, visit the [Account center](https://account.windowsazure.de).
-
-## Next steps
-Learn how to [migrate to a new Azure region](./germany-migration-main.md).
----
germany Germany Migration Analytics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-analytics.md
- Title: Migrate Azure analytics resources, Azure Germany to global Azure
-description: This article provides information about migrating your Azure analytics resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate analytics resources to global Azure
---
-This article has information that can help you migrate Azure analytics resources from Azure Germany to global Azure.
-
-## Event Hubs
-
-You can't directly migrate Azure Event Hubs resources from Azure Germany to global Azure. The Event Hubs service doesn't have data export or import capabilities. However, you can export Event Hubs resources [as a template](../azure-resource-manager/templates/export-template-portal.md). Then, adapt the exported template for global Azure and re-create the resources.
-
-> [!NOTE]
-> Exporting an Event Hubs template doesn't copy data (for example, messages). Exporting a template only re-creates Event Hubs metadata.
-
-> [!IMPORTANT]
-> Change location, Azure Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
-
-### Event Hubs metadata
-
-The following metadata elements are re-created when you export an Event Hubs template:
--- Namespaces-- Event hubs-- Consumer groups-- Authorization rules-
-For more information:
--- Review the [Event Hubs overview](../event-hubs/event-hubs-about.md).-- Refresh your knowledge by completing the [Event Hubs tutorials](../event-hubs/index.yml).-- Check the migration steps for [Azure Service Bus](./germany-migration-integration.md#service-bus).-- Become familiar with how to [export Azure Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md) or read the overview of [Azure Resource Manager](../azure-resource-manager/management/overview.md).-
-## HDInsight
-
-To migrate Azure HDInsight clusters from Azure Germany to global Azure:
-
-1. Stop the HDInsight cluster.
-2. Migrate the data in the Azure Storage account to the new region by using AzCopy or a similar tool.
-3. Create new compute resources in global Azure, and then attach the migrated storage resources as the primary attached storage.
-
-For more specialized, long-running clusters (Kafka, Spark streaming, Storm, or HBase), we recommend that you orchestrate the transition of workloads to the new region.
-
-For more information:
--- Review the [Azure HDInsight documentation](../hdinsight/index.yml).-- Refresh your knowledge by completing the [HDInsight tutorials](../hdinsight/index.yml).-- For help with [scaling HDInsight clusters](../hdinsight/hdinsight-administer-use-powershell.md#scale-clusters), see [Administer HDInsight by using PowerShell](../hdinsight/hdinsight-administer-use-powershell.md).-- Learn how to use [AzCopy](../storage/common/storage-use-azcopy-v10.md).-
-## Stream Analytics
-
-To migrate Azure Stream Analytics services from Azure Germany to global Azure, manually re-create the entire setup in a global Azure region either by using the Azure portal or by using PowerShell. Ingress and egress sources for a Stream Analytics job can be in any region.
-
-For more information:
--- Refresh your knowledge by completing the [Stream Analytics tutorials](../stream-analytics/stream-analytics-real-time-fraud-detection.md).-- Review the [Stream Analytics overview](../stream-analytics/stream-analytics-introduction.md).-- Learn how to [create a Stream Analytics job by using PowerShell](../stream-analytics/stream-analytics-quick-create-powershell.md).-
-## SQL Database
-
-To migrate smaller Azure SQL Database workloads, use the export function to create a BACPAC file. A BACPAC file is a compressed (zipped) file that contains metadata and the data from the SQL Server database. After you create the BACPAC file, you can copy the file to the target environment (for example, by using AzCopy) and use the import function to rebuild the database. Be aware of the following considerations:
--- For an export to be transactionally consistent, make sure that one of the following conditions is true:
- - No write activity occurs during the export.
- - You export from a transactionally consistent copy of your SQL database.
-- To export to Azure Blob storage, the BACPAC file size is limited to 200 GB. For a larger BACPAC file, export to local storage.-- If the export operation from SQL Database takes longer than 20 hours, the operation might be canceled. Check the following articles for tips about how to increase performance.-
-> [!NOTE]
-> The connection string changes after the export operation because the DNS name of the server changes during export.
-
-For more information:
--- Learn how to [export a database to a BACPAC file](../azure-sql/database/database-export.md).-- Learn how to [import a BACPAC file to a database](../azure-sql/database/database-import.md).-- Review the [Azure SQL Database documentation](/azure/sql-database/).-
-## Analysis Services
-
-To migrate your Azure Analysis Services models from Azure Germany to global Azure, use [backup and restore operations](../analysis-services/analysis-services-backup.md).
-
-If you want to migrate only the model metadata and not the data, an alternative is to [redeploy the model from SQL Server Data Tools](../analysis-services/analysis-services-deploy.md).
-
-For more information:
--- Learn about [Analysis Services backup and restore](../analysis-services/analysis-services-backup.md).-- Review the [Analysis Services overview](../analysis-services/analysis-services-overview.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Compute https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-compute.md
- Title: Migrate Azure compute resource from Azure Germany to global Azure
-description: This article provides information about migrating your Azure compute resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate compute resources to global Azure
--
-This article has information that can help you migrate Azure compute resources from Azure Germany to global Azure.
-
-## Compute IaaS
-
-You can't directly migrate Azure compute infrastructure as a service (IaaS) resources from Azure Germany to global Azure. But, there are multiple ways you can "duplicate" your VMs.
-
-### Duplicate by using Site Recovery
-
-Azure Site Recovery can help you migrate your VMs from Azure Germany to global Azure. Because the source and target are in different tenants in a migration from Azure Germany to global Azure, you can't use the normal Azure Disaster Recovery option that's available for VMs. The trick is to set up a Site Recovery vault in the target environment (global Azure) and to proceed like you're moving a physical server to Azure. In the Azure portal, select a replication path labeled **Not virtualized**. When the replication is finished, do a failover.
-
-> [!NOTE]
-> The following steps are the same steps you would take to migrate a physical server that's running on-premises to Azure.
-
-To learn more, review this [helpful Site Recovery tutorial](../site-recovery/physical-azure-disaster-recovery.md). For a quick overview, here's a shorter and slightly adapted version of the process:
-
-Install a configuration/process server in your source environment to build the server images. Then, replicate the images to the Azure Recovery Services vault in your target environment. The work is all done by the configuration server. You don't need to touch the individual servers.
-
-1. Sign in to the Azure Germany portal.
-1. Compare the OS versions of the VMs you want to migrate against the [support matrix](../site-recovery/vmware-physical-secondary-support-matrix.md).
-1. Set up a new VM in your source Azure Virtual Network instance to act as the configuration server:
- 1. Select **DS4v3** or higher (4 to 8 cores, 16-GB memory).
- 1. Attach an additional disk that has at least 1 TB of available space (for the VM images).
- 1. Use Windows Server 2012 R2 or later.
-1. Make sure that ports 443 and 9443 are open for the subnet in both directions.
-1. Sign in to the new VM (ConfigurationServer).
-1. In your remote desktop session, sign in to the global Azure portal by using your global Azure credentials.
-1. Set up a virtual network in which the replicated VMs will run.
-1. Create an Azure Storage account.
-1. Set up the Recovery Services vault.
-1. Define **Protection goal** (**To Azure** > **Not virtualized/other**).
-1. Download the Recovery Unified Setup installation file (**Prepare Infrastructure** > **Source**). When you open the portal URL from within ConfigurationServer, the file is downloaded to the correct server. From outside ConfigurationServer, upload the installation file to ConfigurationServer.
-1. Download the vault registration key (upload it to ConfigurationServer like in the preceding step, if necessary).
-1. Run the Recovery Unified Setup installation on ConfigurationServer.
-1. Set up the target environment (check that you're still signed in to the target portal).
-1. Define the replication policy.
-1. Start replication.
-
-After replication initially succeeds, test the scenario by doing a test failover. Verify and delete the test. Your final step is to do the real failover.
-
-> [!CAUTION]
-> Syncing back to the source VM doesn't occur. If you want to migrate again, clean up everything and start again at the beginning!
-
-### Duplicate by using Resource Manager template export/import
-
-You can export the Azure Resource Manager template that you use to deploy to your local machine. Edit the template to change the location and other parameters or variables. Then, redeploy in global Azure.
-
-> [!IMPORTANT]
-> Change location, Azure Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
-
-Export the Resource Manager template in the portal by selecting the resource group. Select **deployments**, and then select the most recent deployment. Select **Template** in the left menu and download the template.
-
-A .zip file that has several files in it downloads. The PowerShell, Azure CLI, Ruby, or .NET scripts help you deploy your template. The file *parameters.json* has all the input from the last deployment. It's likely that you will need to change some settings in this file. Edit the *template.json* file if you want to redeploy only a subset of the resources.
-
-For more information:
--- Refresh your knowledge by completing the [Site Recovery tutorials](../site-recovery/index.yml).-- Get information about how to [export Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md) or read an overview of [Azure Resource Manager](../azure-resource-manager/management/overview.md).-- Learn more about [physical-to-Azure disaster recovery by using Site Recovery](../site-recovery/physical-azure-disaster-recovery.md).-- Read the [overview of Azure locations](https://azure.microsoft.com/global-infrastructure/locations/).-- Learn more about how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Cloud Services
-
-You can redeploy Azure Cloud Services resources by providing the `.cspkg` and `.cscfg` definitions again.
-
-### Azure portal
-
-To redeploy cloud services in the Azure portal:
-
-1. [Create a new cloud service](../cloud-services/cloud-services-how-to-create-deploy-portal.md) by using your `.cspkg` and `.cscfg` definitions.
-1. Update the [CNAME or A record](../cloud-services/cloud-services-custom-domain-name-portal.md) to point traffic to the new cloud service.
-1. When traffic points to the new cloud service, delete the old cloud service in Azure Germany.
-
-### PowerShell
-
-To redeploy cloud services by using PowerShell:
-
-1. [Create a new cloud service](/powershell/module/servicemanagement/azure.service/new-azureservice) by using your `.cspkg` and `.cscfg` definitions.
-
- ```powershell
- New-AzureService -ServiceName <yourServiceName> -Label <MyTestService> -Location <westeurope>
- ```
-
-1. [Create a new deployment](/powershell/module/servicemanagement/azure.service/new-azuredeployment) by using your `.cspkg` and `.cscfg` definitions.
-
- ```powershell
- New-AzureDeployment -ServiceName <yourServiceName> -Slot <Production> -Package <YourCspkgFile.cspkg> -Configuration <YourConfigFile.cscfg>
- ```
-
-1. Update the [CNAME or A record](../cloud-services/cloud-services-custom-domain-name-portal.md) to point traffic to the new cloud service.
-1. When traffic points to the new cloud service, [delete the old cloud service](/powershell/module/servicemanagement/azure.service/remove-azureservice) in Azure Germany.
-
- ```powershell
- Remove-AzureService -ServiceName <yourOldServiceName>
- ```
-
-### REST API
-
-To redeploy cloud services by using the REST API:
-
-1. [Create a new cloud service](/rest/api/compute/cloudservices/rest-create-cloud-service) in the target environment.
-
- ```http
- https://management.core.windows.net/<subscription-id>/services/hostedservices
- ```
-
-1. Create a new deployment by using the [Create Deployment API](/previous-versions/azure/reference/ee460813(v=azure.100)). To find your `.cspkg` and `.cscfg` definitions, you can call the [Get Package API](/previous-versions/azure/reference/jj154121(v=azure.100)).
-
- ```http
- https://management.core.windows.net/<subscription-id>/services/hostedservices/<cloudservice-name>/deploymentslots/production
- ```
-
-1. When traffic points to the new cloud service, [delete the old cloud service](/rest/api/compute/cloudservices/rest-delete-cloud-service) in Azure Germany.
-
- ```http
- https://management.core.cloudapi.de/<subscription-id>/services/hostedservices/<old-cloudservice-name>
- ```
-
-For more information:
--- Review the [Cloud Services overview](../cloud-services/cloud-services-choose-me.md).-
-## Service Fabric
-
-You can't migrate Azure Service Fabric resources from Azure Germany to global Azure. You must redeploy Service Fabric resources in the new environment.
-
-You can get information about your current Service Fabric environment by using PowerShell cmdlets. Access all cmdlets that are related to Service Fabric by entering `Get-Help *ServiceFabric*` in PowerShell.
-
-For more information:
--- Refresh your knowledge by completing the [Service Fabric tutorials](../service-fabric/service-fabric-tutorial-create-dotnet-app.md).-- Learn how to [create a new cluster](../service-fabric/service-fabric-cluster-creation-via-portal.md).-- Review the [Service Fabric overview](../service-fabric/service-fabric-overview.md).-
-## Batch
-
-You can't migrate Azure Batch account data from one region to another. The account might have running VMs associated with it and be actively interacting with data in storage accounts, databases, or other storage systems.
-
-Redeploy your deployment scripts, templates, or code in the new region. Redeployment includes the following tasks:
-
-1. [Create a Batch account](../batch/batch-account-create-portal.md).
-1. [Increase your Batch account quota](../batch/batch-quota-limit.md).
-1. Create Batch pools.
-1. Create new storage accounts, databases, and other services that are used to persist input and output data.
-1. Update your configuration and code to point to the new Batch account and use new credentials.
-
-For more information:
--- Refresh your knowledge by completing the [Batch tutorials](../batch/tutorial-parallel-dotnet.md).-- Review the [Azure Batch overview](../batch/batch-technical-overview.md).-
-## Functions
-
-Migrating Azure Functions resources from Azure Germany to global Azure isn't supported at this time. We recommend that you export the Resource Manager template, change the location, and then redeploy to the target region.
-
-> [!IMPORTANT]
-> Change location, Key Vault secrets, certificates, App Settings, and other GUIDs to be consistent with the new region.
-
-For more information:
--- Refresh your knowledge by completing the [Functions tutorials](../azure-functions/index.yml).-- Learn how to [export Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md) or read an overview of [Azure Resource Manager](../azure-resource-manager/management/overview.md).-- Review the [Azure Functions overview](../azure-functions/functions-overview.md).-- Get an [overview of Azure locations](https://azure.microsoft.com/global-infrastructure/locations/).-- Learn how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Virtual machine scale sets
-
-To migrate virtual machine scale sets to global Azure, export the Resource Manager template, adapt it to the new environment, and then redeploy to the target region. Export only the base template and redeploy the template in the new environment. Individual virtual machine scale set instances should all be the same.
-
-> [!IMPORTANT]
-> Change location, Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
-
-For more information:
--- Refresh your knowledge by completing the [virtual machine scale set tutorials](../virtual-machine-scale-sets/tutorial-create-and-manage-cli.md).-- Learn how to [export Azure Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md).-- Review the [Azure Resource Manager overview](../azure-resource-manager/management/overview.md).-- Get an overview of [virtual machine scale sets](../virtual-machine-scale-sets/overview.md).-- Read an [overview of Azure locations](https://azure.microsoft.com/global-infrastructure/locations/).-- Learn how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Web Apps
-
-Currently, apps that you created by using the Web Apps feature of Azure App Service can't be migrated from Azure Germany to global Azure. We recommend that you export a web app as a Resource Manager template, and then redeploy after you change the location property to the new region.
-
-> [!IMPORTANT]
-> Change location, Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
-
-For more information:
--- Refresh your knowledge by completing the [App Service tutorials](../app-service/tutorial-dotnetcore-sqldb-app.md).-- Learn how to [export Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md) or read an overview of [Azure Resource Manager](../azure-resource-manager/management/overview.md).-- Review the [App Service overview](../app-service/overview.md).-- Read the [overview of Azure locations](https://azure.microsoft.com/global-infrastructure/locations/).-- Learn how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Databases https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-databases.md
- Title: Migrate Azure database resources, Azure Germany to global Azure
-description: This article provides information about migrating your Azure database resources from Azure Germany to global Azure
- Previously updated : 03/29/2021------
-# Migrate database resources to global Azure
--
-This article has information that can help you migrate Azure database resources from Azure Germany to global Azure.
-
-## SQL Database
-
-To migrate smaller Azure SQL Database workloads, without keeping the migrated database online, use the export function to create a BACPAC file. A BACPAC file is a compressed (zipped) file that contains metadata and the data from the SQL Server database. After you create the BACPAC file, you can copy the file to the target environment (for example, by using AzCopy) and use the import function to rebuild the database. Be aware of the following considerations:
--- For an export to be transactionally consistent, make sure that one of the following conditions is true:
- - No write activity occurs during the export.
- - You export from a transactionally consistent copy of your SQL database.
-- To export to Azure Blob storage, the BACPAC file size is limited to 200 GB. For a larger BACPAC file, export to local storage.-- If the export operation from SQL Database takes longer than 20 hours, the operation might be canceled. Check the following articles for tips about how to increase performance.-
-> [!NOTE]
-> The connection string changes after the export operation because the DNS name of the server changes during export.
-
-For more information:
--- Learn how to [export a database to a BACPAC file](../azure-sql/database/database-export.md).-- Learn how to [import a BACPAC file to a database](../azure-sql/database/database-import.md).-- Review the [Azure SQL Database documentation](/azure/sql-database/).---
-## Migrate SQL Database using active geo-replication
-
-For databases that are too large for BACPAC files, or to migrate from one cloud to another and remain online with minimum downtime, you can configure active geo-replication from Azure Germany to global Azure.
-
-> [!IMPORTANT]
-> Configuring active geo-replication to migrate databases to global Azure is only supported using Transact-SQL (T-SQL), and prior to migrating you must request enablement of your subscription to support migrating to global Azure. To submit a request, you must use [this support request link](#requesting-access).
-
-> [!Note]
-> Azure global cloud regions, Germany West Central and Germany North, are the supported regions for active geo-replication with the Azure Germany cloud. If an alternative global Azure region is desired as the final database(s) destination, the recommendation after completion of the migration to global Azure is to configure an additional geo-replication link from Germany West Central or Germany North to the required Azure global cloud region.
-
-For details about active geo-replication costs, see the section titled **Active geo-replication** in [Azure SQL Database pricing](https://azure.microsoft.com/pricing/details/sql-database/single/).
-
-Migrating databases with active geo-replication requires an Azure SQL logical server in global Azure. You can create the server using the portal, Azure PowerShell, Azure CLI, etc., but configuring active geo-replication to migrate from Azure Germany to global Azure is only supported using Transact-SQL (T-SQL).
-
-> [!IMPORTANT]
-> When migrating between clouds, the primary (Azure Germany) and secondary (global Azure) server name prefixes must be different. If the server names are the same, running the ALTER DATABASE statement will succeed, but the migration will fail. For example, if the prefix of the primary server name is `myserver` (`myserver.database.cloudapi.de`), the prefix of the secondary server name in global Azure cannot be `myserver`.
--
-The `ALTER DATABASE` statement allows you to specify a target server in global Azure by using its fully qualified dns server name on the target side.
--
-```sql
-ALTER DATABASE [sourcedb] add secondary on server [public-server.database.windows.net]
-```
---- *`sourcedb`* represents the database name in an Azure SQL server in Azure Germany. -- *`public-server.database.windows.net`* represents the Azure SQL server name that exists in global Azure, where the database should be migrated. The namespace "database.windows.net" is required, replace *public-server* with the name of your logical SQL server in global Azure. The server in global Azure must have a different name than the primary server in Azure Germany.--
-The command is executed on the master database on the Azure Germany server hosting the local database to be migrated.
-- The T-SQL start-copy API authenticates the logged-in user in the public cloud server by finding a user with the same SQL login/user name in master database of that server. This approach is cloud-agnostic; thus, the T-SQL API is used to start cross-cloud copies. For permissions and more information on this topic see [Creating and using active geo-replication](../azure-sql/database/active-geo-replication-overview.md) and [ALTER DATABASE (Transact-SQL)](/sql/t-sql/statements/alter-database-transact-sql/).-- Except for the initial T-SQL command extension indicating an Azure SQL logical server in global Azure, the rest of the active geo-replication process is identical to the existing execution in the local cloud. For detailed steps to create active geo-replication, see [Creating and using active geo-replication](../azure-sql/database/active-geo-replication-overview.md) with an exception the secondary database is created in the secondary logical server created in global Azure. -- Once the secondary database exists in global Azure (as its online copy of the Azure Germany database), customer can initiate a database failover from Azure Germany to global Azure for this database using the ALTER DATABASE T-SQL command (see the table below).-- After the failover, once the secondary becomes a primary database in global Azure, you can stop the active geo-replication and remove the secondary database on the Azure Germany side at any time (see the table below and the steps present in the diagram). -- After failover, the secondary database in Azure Germany will continue to incur costs until deleted.
-
-- Using the `ALTER DATABASE` command is the only way to set up active geo-replication to migrate an Azure Germany database to global Azure. -- No Azure portal, Azure Resource Manager, PowerShell, or CLI is available to configure active geo-replication for this migration. -
-To migrate a database from Azure Germany to global Azure:
-
-1. Choose the user database in Azure Germany, for example, `azuregermanydb`
-2. Create a logical server in global Azure (the public cloud), for example, `globalazureserver`.
-Its fully qualified domain name (FQDN) is `globalazureserver.database.windows.net`.
-3. Start active geo-replication from Azure Germany to global Azure by executing this T-SQL command on the server in Azure Germany. Note that the fully qualified dns name is used for the public server `globalazureserver.database.windows.net`. This is to indicate that the target server is in global Azure, and not Azure Germany.
-
- ```sql
- ALTER DATABASE [azuregermanydb] ADD SECONDARY ON SERVER [globalazureserver.database.windows.net];
- ```
-
-4. When the replication is ready to move the read-write workload to the global Azure server, initiate a planned failover to global Azure by executing this T-SQL command on the global Azure server.
-
- ```sql
- ALTER DATABASE [azuregermanydb] FAILOVER;
- ```
-
-5. The active geo-replication link can be terminated before or after the failover process. Executing the following T-SQL command after the planned failover removes the geo-replication link with the database in global Azure being the read-write copy. It should be run on the current geo-primary database's logical server (i.e. on the global Azure server). This will complete the migration process.
-
- ```sql
- ALTER DATABASE [azuregermanydb] REMOVE SECONDARY ON SERVER [azuregermanyserver];
- ```
-
- The following T-SQL command when executed before the planned failover also stops the migration process, but in this situation the database in Azure Germany will remain the read-write copy. This T-SQL command should also be run on the current geo-primary database's logical server, in this case on the Azure Germany server.
-
- ```sql
- ALTER DATABASE [azuregermanydb] REMOVE SECONDARY ON SERVER [globalazureserver];
- ```
-
-These steps to migrate Azure SQL databases from Azure Germany to global Azure can also be followed using active geo-replication.
--
-For more information the following tables below indicates T-SQL commands for managing failover. The following commands are supported for cross-cloud active geo-replication between Azure Germany and global Azure:
-
-|Command |Description|
-|:--|:--|
-|[ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true) |Use ADD SECONDARY ON SERVER argument to create a secondary database for an existing database and starts data replication|
-|[ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true) |Use FAILOVER or FORCE_FAILOVER_ALLOW_DATA_LOSS to switch a secondary database to be primary to initiate failover |
-|[ALTER DATABASE](/sql/t-sql/statements/alter-database-transact-sql?view=azuresqldb-current&preserve-view=true) |Use REMOVE SECONDARY ON SERVER to terminate a data replication between a SQL Database and the specified secondary database. |
-
-### Active geo-replication monitoring system views
-
-|Command |Description|
-|:--|:--|
-|[sys.geo_replication_links](/sql/relational-databases/system-dynamic-management-views/sys-geo-replication-links-azure-sql-database?view=azuresqldb-current&preserve-view=true)|Returns information about all existing replication links for each database on the Azure SQL Database server. |
-|[sys.dm_geo_replication_link_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-geo-replication-link-status-azure-sql-database?view=azuresqldb-current&preserve-view=true) |Gets the last replication time, last replication lag, and other information about the replication link for a given SQL database. |
-|[sys.dm_operation_status](/sql/relational-databases/system-dynamic-management-views/sys-dm-operation-status-azure-sql-database?view=azuresqldb-current&preserve-view=true) | Shows the status for all database operations including the status of the replication links. |
-|[sp_wait_for_database_copy_sync](/sql/relational-databases/system-stored-procedures/active-geo-replication-sp-wait-for-database-copy-sync?view=azuresqldb-current&preserve-view=true) | Causes the application to wait until all committed transactions are replicated and acknowledged by the active secondary database. |
-
-
-## Migrate SQL Database long-term retention backups
-
-Migrating a database with geo-replication or BACPAC file does not copy over the long-term retention backups, that the database might have in Azure Germany. To migrate existing long-term retention backups to the target global Azure region, you can use the COPY long-term retention backup procedure.
-
->[!Note]
->LTR backup copy methods documented here can only copy the LTR backups from Azure Germany to global Azure. Copying PITR backups using these methods is not supported.
->
-
-### Pre-requisites
-
-1. Target database where you are copying the LTR backups, in global Azure must exist before you start the copying the backups. It is recommended that you first migrate the source database using [active geo-replication](#migrate-sql-database-using-active-geo-replication) and then initiate the LTR backup copy. This will ensure that the database backups are copied to the correct destination database. This step is not required, if you are copying over LTR backups of a dropped database. When copying LTR backups of a dropped database, a dummy DatabaseID will be created in the target region.
-2. Install this [PowerShell Az Module](https://www.powershellgallery.com/packages/Az.Sql/3.0.0-preview)
-3. Before you begin, ensure that required [Azure RBAC roles](../azure-sql/database/long-term-backup-retention-configure.md#prerequisites) are granted at either **subscription** or **resource group** scope. Note: To access LTR backups that belong to a dropped server, the permission must be granted in the subscription scope of that server. .
--
-### Limitations
--- Failover Groups are not supported. This means that customers migrating Azure Germany database(s) will need to manage connection strings themselves during failover.-- No support for Azure portal, Azure Resource Manager APIs, PowerShell, or CLI. This means that each Azure Germany migration will need to manage active geo-replication setup and failover through T-SQL.-- Customers cannot create multiple geo-secondaries in global Azure for databases in Azure Germany.-- Creation of a geo secondary must be initiated from the Azure Germany region.-- Customers can migrate databases out of Azure Germany only to global Azure. Currently no other cross-cloud migration is supported. -- Azure AD users in Azure Germany user databases are migrated but are not available in the new Azure AD tenant where the migrated database resides. To enable these users, they must be manually dropped and recreated using the current Azure AD users available in the new Azure AD tenant where the newly migrated database resides. --
-### Copy long-term retention backups using PowerShell
-
-A new PowerShell command **Copy-AzSqlDatabaseLongTermRetentionBackup** has been introduced, which can be used to copy the long-term retention backups from Azure Germany to Azure global regions.
-
-1. **Copy LTR backup using backup name**
-Following example shows how you can copy a LTR backup from Azure Germany to Azure global region, using the backupname.
-
-```powershell
-# Source database and target database info
-$location = "<location>"
-$sourceRGName = "<source resourcegroup name>"
-$sourceServerName = "<source server name>"
-$sourceDatabaseName = "<source database name>"
-$backupName = "<backup name>"
-$targetDatabaseName = "<target database name>"
-$targetSubscriptionId = "<target subscriptionID>"
-$targetRGName = "<target resource group name>"
-$targetServerFQDN = "<targetservername.database.windows.net>"
-
-Copy-AzSqlDatabaseLongTermRetentionBackup
- -Location $location
- -ResourceGroupName $sourceRGName
- -ServerName $sourceServerName
- -DatabaseName $sourceDatabaseName
- -BackupName $backupName
- -TargetDatabaseName $targetDatabaseName
- -TargetSubscriptionId $targetSubscriptionId
- -TargetResourceGroupName $targetRGName
- -TargetServerFullyQualifiedDomainName $targetServerFQDN
-```
-
-2. **Copy LTR backup using backup resourceID**
-Following example shows how you can copy LTR backup from Azure Germany to Azure global region, using a backup resourceID. This example can be used to copy backups of a deleted database as well.
-
-```powershell
-$location = "<location>"
-# list LTR backups for All databases (you have option to choose All/Live/Deleted)
-$ltrBackups = Get-AzSqlDatabaseLongTermRetentionBackup -Location $location -DatabaseState All
-
-# select the LTR backup you want to copy
-$ltrBackup = $ltrBackups[0]
-$resourceID = $ltrBackup.ResourceId
-
-# Source Database and target database info
-$targetDatabaseName = "<target database name>"
-$targetSubscriptionId = "<target subscriptionID>"
-$targetRGName = "<target resource group name>"
-$targetServerFQDN = "<targetservername.database.windows.net>"
-
-Copy-AzSqlDatabaseLongTermRetentionBackup
- -ResourceId $resourceID
- -TargetDatabaseName $targetDatabaseName
- -TargetSubscriptionId $targetSubscriptionId
- -TargetResourceGroupName $targetRGName
- -TargetServerFullyQualifiedDomainName $targetServerFQDN
-```
--
-### Limitations
--- [Point-in-time restore (PITR)](../azure-sql/database/recovery-using-backups.md#point-in-time-restore) backups are only taken on the primary database, this is by design. When migrating databases from Azure Germany using Geo-DR, PITR backups will start happening on the new primary after failover. However, the existing PITR backups (on the previous primary in Azure Germany) will not be migrated. If you need PITR backups to support any point-in-time restore scenarios, you need to restore the database from PITR backups in Azure Germany and then migrate the recovered database to global Azure. -- Long-term retention policies are not migrated with the database. If you have a [long-term retention (LTR)](../azure-sql/database/long-term-retention-overview.md) policy on your database in Azure Germany, you need to manually copy and recreate the LTR policy on the new database after migrating. --
-### Requesting access
-
-To migrate a database from Azure Germany to global Azure using geo-replication, your subscription *in Azure Germany* needs to be enabled to successfully configure the cross-cloud migration.
-
-To enable your Azure Germany subscription, you must use the following link to create a migration support request:
-
-1. Browse to the following [migration support request](https://portal.microsoftazure.de/#create/Microsoft.Support/Parameters/%7B%0D%0A++++%22pesId%22%3A+%22f3dc5421-79ef-1efa-41a5-42bf3cbb52c6%22%2C%0D%0A++++%22supportTopicId%22%3A+%229fc72ed5-805f-3894-eb2b-b1f1f6557d2d%22%2C%0D%0A++++%22contextInfo%22%3A+%22Migration+from+cloud+Germany+to+Azure+global+cloud+%28Azure+SQL+Database%29%22%2C%0D%0A++++%22caller%22%3A+%22NoSupportPlanCloudGermanyMigration%22%2C%0D%0A++++%22severity%22%3A+%223%22%0D%0A%7D).
-
-2. On the Basics tab, enter *Geo-DR migration* as the **Summary**, and then select **Next: Solutions**
-
- :::image type="content" source="media/germany-migration-databases/support-request-basics.png" alt-text="new support request form":::
-
-3. Review the **Recommended Steps**, then select **Next: Details**.
-
- :::image type="content" source="media/germany-migration-databases/support-request-solutions.png" alt-text="required support request information":::
-
-4. On the details page, provide the following:
-
- 1. In the Description box, enter the global Azure subscription ID to migrate to. To migrate databases to more than one subscription, add a list of the global Azure IDs you want to migrate databases to.
- 1. Provide contact information: name, company name, email or phone number.
- 1. Complete the form, then select **Next: Review + create**.
-
- :::image type="content" source="media/germany-migration-databases/support-request-details.png" alt-text="support request details":::
--
-5. Review the support request, then select **Create**.
--
-You'll be contacted once the request is processed.
---
-## Azure Cosmos DB
-
-You can use Azure Cosmos DB Data Migration Tool to migrate data to Azure Cosmos DB. Azure Cosmos DB Data Migration Tool is an open-source solution that imports data to Azure Cosmos DB from different sources including: JSON files, MongoDB, SQL Server, CSV files, Azure Table storage, Amazon DynamoDB, HBase, and Azure Cosmos containers.
--
-Azure Cosmos DB Data Migration Tool is available as a graphical interface tool or as command-line tool. The source code is available in the [Azure Cosmos DB Data Migration Tool](https://github.com/azure/azure-documentdb-datamigrationtool) GitHub repository. A [compiled version of the tool](https://www.microsoft.com/download/details.aspx?id=46436) is available in the Microsoft Download Center.
-
-To migrate Azure Cosmos DB resources, we recommend that you complete the following steps:
-
-1. Review application uptime requirements and account configurations to determine the best action plan.
-1. Clone the account configurations from Azure Germany to the new region by running the data migration tool.
-1. If using a maintenance window is possible, copy data from the source to the destination by running the data migration tool.
-1. If using a maintenance window isn't an option, copy data from the source to the destination by running the tool, and then complete these steps:
- 1. Use a config-driven approach to make changes to read/write in an application.
- 1. Complete a first-time sync.
- 1. Set up an incremental sync and catch up with the change feed.
- 1. Point reads to the new account and validate the application.
- 1. Stop writes to the old account, validate that the change feed is caught up, and then point writes to the new account.
- 1. Stop the tool and delete the old account.
-1. Run the tool to validate that data is consistent across old and new accounts.
-
-For more information:
--- To learn how to use the Data migration tool, see [Tutorial: Use Data migration tool to migrate your data to Azure Cosmos DB](../cosmos-db/import-data.md).-- To learn about Cosmos DB, see [Welcome to Azure Cosmos DB](../cosmos-db/introduction.md).--
-## Azure Cache for Redis
-
-You have a few options if you want to migrate an Azure Cache for Redis instance from Azure Germany to global Azure. The option you choose depends on your requirements.
-
-### Option 1: Accept data loss, create a new instance
-
-This approach makes the most sense when both of the following conditions are true:
--- You're using Azure Cache for Redis as a transient data cache.-- Your application will repopulate the cache data automatically in the new region.-
-To migrate with data loss and create a new instance:
-
-1. Create a new Azure Cache for Redis instance in the new target region.
-1. Update your application to use the new instance in the new region.
-1. Delete the old Azure Cache for Redis instance in the source region.
-
-### Option 2: Copy data from the source instance to the target instance
-
-A member of the Azure Cache for Redis team wrote an open-source tool that copies data from one Azure Cache for Redis instance to another without requiring import or export functionality. See step 4 in the following steps for information about the tool.
-
-To copy data from the source instance to the target instance:
-
-1. Create a VM in the source region. If your dataset in Azure Cache for Redis is large, make sure that you select a relatively powerful VM size to minimize copying time.
-1. Create a new Azure Cache for Redis instance in the target region.
-1. Flush data from the **target** instance. (Make sure *not* to flush from the **source** instance. Flushing is required because the copy tool *doesn't overwrite* existing keys in the target location.)
-1. Use the following tool to automatically copy data from the source Azure Cache for Redis instance to the target Azure Cache for Redis instance: [Tool source](https://github.com/deepakverma/redis-copy) and [tool download](https://github.com/deepakverma/redis-copy/releases/download/alpha/Release.zip).
-
-> [!NOTE]
-> This process can take a long time depending on the size of your dataset.
-
-### Option 3: Export from the source instance, import to the destination instance
-
-This approach takes advantage of features that are available only in the Premium tier.
-
-To export from the source instance and import to the destination instance:
-
-1. Create a new Premium tier Azure Cache for Redis instance in the target region. Use the same size as the source Azure Cache for Redis instance.
-1. [Export data from the source cache](../azure-cache-for-redis/cache-how-to-import-export-data.md) or use the [Export-AzRedisCache PowerShell cmdlet](/powershell/module/az.rediscache/export-azrediscache).
-
- > [!NOTE]
- > The export Azure Storage account must be in the same region as the cache instance.
-
-1. Copy the exported blobs to a storage account in destination region (for example, by using AzCopy).
-1. [Import data to the destination cache](../azure-cache-for-redis/cache-how-to-import-export-data.md) or use the [Import-AzRedisCAche PowerShell cmdlet](/powershell/module/az.rediscache/import-azrediscache).
-1. Reconfigure your application to use the target Azure Cache for Redis instance.
-
-### Option 4: Write data to two Azure Cache for Redis instances, read from one instance
-
-For this approach, you must modify your application. The application needs to write data to more than one cache instance while reading from one of the cache instances. This approach makes sense if the data stored in Azure Cache for Redis meets the following criteria:
-- The data is refreshed regularly. -- All data is written to the target Azure Cache for Redis instance.-- You have enough time for all data to be refreshed.-
-For more information:
--- Review the [overview of Azure Cache for Redis](../azure-cache-for-redis/cache-overview.md).-
-## PostgreSQL and MySQL
-
-For more information, see the articles in the "Back up and migrate data" section of [PostgreSQL](../postgresql/index.yml) and [MySQL](../mysql/index.yml).
-
-![PostgreSQL and MySQL](./media/germany-migration-main/databases.png)
-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Identity https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-identity.md
- Title: Migrate Azure identity resources, Azure Germany to global Azure
-description: This article provides information about migrating your Azure identity resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate identity resources to global Azure
--
-This article has information that can help you migrate Azure identity resources from Azure Germany to global Azure.
-
-The guidance on identity / tenants is intended for Azure-only customers. If you use common Azure Active Directory (Azure AD) tenants for Azure and Microsoft 365 (or other Microsoft products), there are complexities in identity migration and you should first contact your Account Manager prior to using this migration guidance.
-
-## Azure Active Directory
-
-Azure AD in Azure Germany is separate from Azure AD in global Azure. Currently, you can't move Azure AD users from Azure Germany to global Azure.
-
-Default tenant names in Azure Germany and global Azure are always different because Azure automatically appends a suffix based on the environment. For example, a user name for a member of the **contoso** tenant in global Azure is **user1\@contoso.microsoftazure.com**. In Azure Germany, it's **user1\@contoso.microsoftazure.de**.
-
-When you use custom domain names (like **contoso.com**) in Azure AD, you must register the domain name in Azure. Custom domain names can be defined in *only one* cloud environment at a time. The domain validation fails when the domain is already registered in *any* instance of Azure Active Directory. For example, the user **user1\@contoso.com** that exists in Azure Germany can't also exist in global Azure under the same name at the same time. The registration for **contoso.com** would fail.
-
-A "soft" migration in which some users are already in the new environment and some users are still in the old environment requires different sign-in names for the different cloud environments.
-
-We don't cover each possible migration scenario in this article. A recommendation depends, for example, on how you provision users, what options you have for using different user names or UserPrincipalNames, and other dependencies. But, we've compiled some hints to help you inventory users and groups in your current environment.
-
-To get a list of all cmdlets related to Azure AD, run:
-
-```powershell
-Get-Help Get-AzureAD*
-```
-
-### Inventory users
-
-To get an overview of all users and groups that exist in your Azure AD instance:
-
-```powershell
-Get-AzureADUser -All $true
-```
-
-To list only enabled accounts, add the following filter:
-
-```powershell
-Get-AzureADUser -All $true | Where-Object {$_.AccountEnabled -eq $true}
-```
-
-To make a full dump of all attributes, in case you forget something:
-
-```powershell
-Get-AzureADUser -All $true | Where-Object {$_.AccountEnabled -eq $true} | Format-List *
-```
-
-To select the attributes that you need to re-create the users:
-
-```powershell
-Get-AzureADUser -All $true | Where-Object {$_.AccountEnabled -eq $true} | select UserPrincipalName,DisplayName,GivenName,Surname
-```
-
-To export the list to Excel, use the **Export-Csv** cmdlet at the end of this list. A complete export might look like this example:
-
-```powershell
-Get-AzureADUser -All $true | Where-Object {$_.AccountEnabled -eq $true} | select UserPrincipalName,DisplayName,GivenName,Surname | Export-Csv -Path c:\temp\alluserUTF8.csv -Delimiter ";" -Encoding UTF8
-```
-
-> [!NOTE]
-> You can't migrate passwords. Instead, you must assign new passwords or use a self-service mechanism, depending on your scenario.
->
->Also, depending on your environment, you might need to collect other information, for example, values for **Extensions**, **DirectReport**, or **LicenseDetail**.
-
-Format your CSV file as needed. Then, follow the steps described in [Import data from CSV](/powershell/azure/active-directory/importing-data) to re-create the users in your new environment.
-
-### Inventory groups
-
-To document group membership:
-
-```powershell
-Get-AzureADGroup
-```
-
-To get the list of members for each group:
-
-```powershell
-Get-AzureADGroup | ForEach-Object {$_.DisplayName; Get-AzureADGroupMember -ObjectId $_.ObjectId}
-```
-
-### Inventory service principals and applications
-
-Although you must re-create all service principals and applications, it's a good practice to document the status of service principals and applications. You can use the following cmdlets to get an extensive list of all service principals:
-
-```powershell
-Get-AzureADServicePrincipal |Format-List *
-```
-
-```powershell
-Get-AzureADApplication |Format-List *
-```
-
-You can get more information by using other cmdlets that start with `Get-AzureADServicePrincipal*` or `Get-AzureADApplication*`.
-
-### Inventory directory roles
-
-To document the current role assignment:
-
-```powershell
-Get-AzureADDirectoryRole
-```
-
-Walk through each role to find users or applications that are associated with the role:
-
-```powershell
-Get-AzureADDirectoryRole | ForEach-Object {$_.DisplayName; Get-AzureADDirectoryRoleMember -ObjectId
-$_.ObjectId | Format-Table}
-```
-For more information:
--- Learn about [hybrid identity solutions](../active-directory/hybrid/whatis-hybrid-identity.md).-- Read the blog post [Use Azure AD Connect with multiple clouds](/archive/blogs/ralfwi/using-adconnect-with-multiple-clouds) to learn about ways you can sync to different cloud environments.-- Learn more about [Azure Active Directory](../active-directory/index.yml).-- Read about [custom domain names](../active-directory/fundamentals/add-custom-domain.md).-- Learn how to [import data from CSV to Azure AD](/powershell/azure/active-directory/importing-data).-
-## Azure AD Connect
-
-Azure AD Connect is a tool that syncs your identity data between an on-premises Active Directory instance and Azure Active Directory (Azure AD). The current version of Azure AD Connect works both for Azure Germany and global Azure. Azure AD Connect can sync to only one Azure AD instance at a time. If you want to sync to Azure Germany and global Azure at the same time, consider these options:
--- Use an additional server for a second instance of Azure AD Connect. You can't have multiple instances of Azure AD Connect on the same server.-- Define a new sign-in name for your users. The domain part (after **\@**) of the sign-in name must be different in each environment.-- Define a clear "source of truth" when you also sync backward (from Azure AD to on-premises Active Directory).-
-If you already use Azure AD Connect to sync to and from Azure Germany, make sure that you migrate any manually created users. The following PowerShell cmdlet lists all users that aren't synced by using Azure AD Connect:
-
-```powershell
-Get-AzureADUser -All $true |Where-Object {$_.DirSyncEnabled -ne "True"}
-```
-
-For more information:
--- Learn more about [Azure AD Connect](../active-directory/hybrid/reference-connect-dirsync-deprecated.md).-
-## Multi-Factor Authentication
-
-You must re-create users and redefine your Azure AD Multi-Factor Authentication instance in your new environment.
-
-To get a list of user accounts for which multi-factor authentication is enabled or enforced:
-
-1. Sign in to the Azure portal.
-1. Select **Users** > **All Users** > **Multi-Factor Authentication**.
-1. When you're redirected to the multi-factor authentication service page, set the appropriate filters to get a list of users.
-
-For more information:
--- Learn more about [Azure AD Multi-Factor Authentication](../active-directory/authentication/howto-mfa-getstarted.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Integration https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-integration.md
- Title: Migrate Azure integration resource, Azure Germany to global Azure
-description: This article provides information about migrating your Azure integration resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate integration resources to global Azure
--
-This article has information that can help you migrate Azure integration resources from Azure Germany to global Azure.
-
-## Service Bus
-
-Azure Service Bus services don't have data export or import capabilities. To migrate Service Bus resources from Azure Germany to global Azure, you can export the resources [as an Azure Resource Manager template](../azure-resource-manager/templates/export-template-portal.md). Then, adapt the exported template for global Azure and re-create the resources.
-
-> [!NOTE]
-> Exporting a Resource Manager template doesn't copy the data (for example, messages). Exporting a template only re-creates the metadata.
-
-> [!IMPORTANT]
-> Change location, Azure Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
--
-### Service Bus metadata
-
-The following Service Bus metadata elements are re-created when you export a Resource Manager template:
--- Namespaces-- Queues-- Topics-- Subscriptions-- Rules-- Authorization rules-
-### Keys
-
-The preceding steps to export and re-create don't copy the shared access signature keys that are associated with authorization rules. If you need to preserve the shared access signature keys, use the `New-AzServiceBuskey` cmdlet with the optional parameter `-Keyvalue` to accept the key as a string. The updated cmdlet is available in [Azure PowerShell Az module](/powershell/azure/install-az-ps).
-
-### Usage example
-
-```powershell
-New-AzServiceBuskey -ResourceGroupName <resourcegroupname> -Namespace <namespace> -Name <name of Authorization rule> -RegenerateKey <PrimaryKey/SecondaryKey> -KeyValue <string - key value>
-```
-
-```powershell
-New-AzServiceBuskey -ResourceGroupName <resourcegroupname> -Namespace <namespace> -Queue <queuename> -Name <name of Authorization rule> -RegenerateKey <PrimaryKey/SecondaryKey> -KeyValue <string - key value>
-```
-
-```powershell
-New-AzServiceBuskey -ResourceGroupName <resourcegroupname> -Namespace <namespace> -Topic <topicname> -Name <name of Authorization rule> -RegenerateKey <PrimaryKey/SecondaryKey> -KeyValue <string - key value>
-```
-
-> [!NOTE]
-> You must update your applications to use a new connection string even if you preserve the keys. DNS host names are different in Azure Germany and global Azure.
-
-### Sample connection strings
-
-**Azure Germany**
-
-```cmd
-Endpoint=sb://myBFProdnamespaceName.**servicebus.cloudapi.de**/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=XXXXXXXXXXXXx=
-```
-
-**Global Azure**
-
-```cmd
-Endpoint=sb://myProdnamespaceName.**servicebus.windows.net**/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=XXXXXXXXXXXXx=
-```
-
-For more information:
--- Refresh your knowledge by completing the [Service Bus tutorials](../service-bus-messaging/index.yml).-- Become familiar with how to [export Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md) or read the overview of [Azure Resource Manager](../azure-resource-manager/management/overview.md).-- Review the [Service Bus overview](../service-bus-messaging/service-bus-messaging-overview.md).-
-## Azure Logic Apps
-
-Azure Logic Apps isn't available in Azure Germany, but you can create scheduling jobs by using Azure Logic Apps in global Azure instead. Although previously available in Azure Germany, Azure Scheduler is fully retired since January 31, 2022.
-
-For more information:
--- Learn more by completing the [Azure Logic Apps tutorials](../logic-apps/tutorial-build-schedule-recurring-logic-app-workflow.md).-- Review the [Azure Logic Apps overview](../logic-apps/logic-apps-overview.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Iot https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-iot.md
- Title: Migrate Azure IoT resources from Azure Germany to global Azure
-description: This article provides information about migrating your Azure IoT resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate IoT resources to global Azure
--
-This article has information that can help you migrate Azure IoT resources from Azure Germany to global Azure.
-
-## Azure Cosmos DB
-
-You can use Azure Cosmos DB Data Migration Tool to migrate data to Azure Cosmos DB. Azure Cosmos DB Data Migration Tool is an open-source solution that imports data to Azure Cosmos DB from different sources.
-
-Azure Cosmos DB Data Migration Tool is available as a graphical interface tool or as command-line tool. The source code is available in the [Azure Cosmos DB Data Migration Tool](https://github.com/azure/azure-documentdb-datamigrationtool) GitHub repository. A [compiled version of the tool](https://www.microsoft.com/download/details.aspx?id=46436) is available in the Microsoft Download Center.
-
-To migrate Azure Cosmos DB resources, we recommend that you complete the following steps:
-
-1. Review application uptime requirements and account configurations to determine the best action plan.
-1. Clone the account configurations from Azure Germany to the new region by running the data migration tool.
-1. If using a maintenance window is possible, copy data from the source to the destination by running the data migration tool.
-1. If using a maintenance window isn't an option, copy data from the source to the destination by running the tool, and then complete these steps:
- 1. Use a config-driven approach to make changes to read/write in an application.
- 1. Complete a first-time sync.
- 1. Set up an incremental sync and catch up with the change feed.
- 1. Point reads to the new account and validate the application.
- 1. Stop writes to the old account, validate that the change feed is caught up, and then point writes to the new account.
- 1. Stop the tool and delete the old account.
-1. Run the tool to validate that data is consistent across old and new accounts.
-
-For more information:
--- Read an [introduction to Azure Cosmos DB](../cosmos-db/introduction.md).-- Learn how to [import data to Azure Cosmos DB](../cosmos-db/import-data.md).-
-## Functions
-
-Migrating Azure Functions resources from Azure Germany to global Azure isn't supported at this time. We recommend that you export a Resource Manager template, change the location, and then redeploy to the target region.
-
-> [!IMPORTANT]
-> Change location, Azure Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
-
-For more information:
--- Refresh your knowledge by completing the [Functions tutorials](../azure-functions/index.yml).-- Learn how to [export Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md) or read the overview of [Azure Resource Manager](../azure-resource-manager/management/overview.md).-- Review the [Azure Functions overview](../azure-functions/functions-overview.md).-- Read an [overview of Azure locations](https://azure.microsoft.com/global-infrastructure/locations/).-- Learn how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Notification Hubs
-
-To migrate settings from one instance of Azure Notification Hubs to another instance, export and then import all registration tokens and tags:
-
-1. [Export the existing notification hub registrations](/previous-versions/azure/azure-services/dn790624(v=azure.100)) to an Azure Blob storage container.
-1. Create a new notification hub in the target environment.
-1. [Import your registration tokens](/previous-versions/azure/azure-services/dn790624(v=azure.100)) from Blob storage to your new notification hub.
-
-For more information:
--- Refresh your knowledge by completing the [Notification Hubs tutorials](../notification-hubs/notification-hubs-android-push-notification-google-fcm-get-started.md).-- Review the [Notification Hubs overview](../notification-hubs/notification-hubs-push-notification-overview.md).-
-## IoT Hub
-
-Although you can migrate Azure IoT Hub instances from Azure Germany to global Azure, the migration isn't seamless.
-
-> [!NOTE]
-> This migration might cause downtime and data loss in your Azure IoT application. All telemetry messages, C2D commands, and job-related information (schedules and history) aren't migrated. You must reconfigure your devices and back-end applications to start using the new connection strings.
-
-### Step 1: Re-create the IoT hub
-
-IoT Hub doesn't support cloning natively. However, you can use the Azure Resource Manager feature to [export a resource group as a template](../azure-resource-manager/templates/export-template-portal.md) to export your IoT Hub metadata. Configured routes and other IoT hub settings are included in the exported metadata. Then, redeploy the template in global Azure. You might find it easier to re-create the IoT hub in the Azure portal by looking at the details in the exported JSON.
-
-### Step 2: Migrate device identities
-
-To migrate device identities:
-
-1. In the source tenant in Azure Germany, use the [ExportDevices](../iot-hub/iot-hub-bulk-identity-mgmt.md) Resource Manager API to export all device identities, device twins, and module twins (including the keys) to a storage container. You can use a storage container in Azure Germany or global Azure. Make sure that the generated shared access signature URI has sufficient permissions.
-1. Run the [ImportDevices](../iot-hub/iot-hub-bulk-identity-mgmt.md) Resource Manager API to import all device identities from the storage container to the cloned IoT hub in global Azure.
-1. Reconfigure your devices and back-end services to start using the new connection strings. The host name changes from **\*.azure-devices.de** to **\*.azure-devices.com**.
-
-> [!NOTE]
-> The root certificate authority is different in Azure Germany and global Azure. Account for this when you reconfigure your devices and back-end applications that interact with the IoT Hub instance.
-
-For more information:
--- Learn how to [export IoT Hub bulk identities](../iot-hub/iot-hub-bulk-identity-mgmt.md#export-devices).-- Learn how to [import IoT Hub bulk identities](../iot-hub/iot-hub-bulk-identity-mgmt.md#import-devices).-- Review the [Azure IoT Hub overview](../iot-hub/about-iot-hub.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Main https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-main.md
- Title: Migrate from Azure Germany to global Azure
-description: An introduction to migrating your Azure resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Overview of migration guidance for Azure Germany
--
-The articles in this section were created to help you migrate your workloads from Azure Germany to global Azure. Although the [Azure migration center](https://azure.microsoft.com/migration/) provides tools that help you migrate resources, some of the tools in the Azure migration center are useful only for migrations that occur in the same tenant or in the same region.
-
-The two regions in Germany are entirely separate from global Azure. The clouds in global Azure and in Germany have their own, separate Azure Active Directory (Azure AD) instances. Because of this, Azure Germany tenants are separate from global Azure tenants. This article describes the migration tools that are available when you migrate between *different* tenants.
-
-The guidance on identity / tenants is intended for Azure-only customers. If you use common Azure Active Directory (Azure AD) tenants for Azure and Microsoft 365 (or other Microsoft products), there are complexities in identity migration and you should first read the [Migration phases actions and impacts for the Migration from Microsoft Cloud Deutschland](/microsoft-365/enterprise/ms-cloud-germany-transition-phases). If you have questions, contact your account manager or Microsoft support.
-
-Azure Cloud Solution Providers need to take additional steps to support customers during and after the transition to the new German datacenter region. Learn more about the [additional steps](/microsoft-365/enterprise/ms-cloud-germany-transition-add-csp).
-
-## Migration process
-
-The process that you use to migrate a workload from Azure Germany to global Azure typically is similar to the process that's used to migrate applications to the cloud. The steps in the migration process are:
-
-![Image that shows the four steps in the migration process: Assess, Plan, Migrate, Validate](./media/germany-migration-main/migration-steps.png)
-
-### Assess
-
-It's important to understand your organization's Azure Germany footprint by bringing together Azure account owners, subscription admins, tenant admins, and finance and accounting teams. The people who work in these roles can provide a complete picture of Azure usage for a large organization.
-
-In the assessment stage, compile an inventory of resources:
- - Each subscription admin and tenant admin should run a series of scripts to list resource groups, the resources in each resource group, and resource group deployment settings.
- - Document dependencies across applications in Azure and with external systems.
- - Document the count of each Azure resource and the amount of data that's associated with each instance you want to migrate.
- - Ensure that application architecture documents are consistent with the Azure resources list.
-
-At the end of this stage, you have:
--- A complete list of Azure resources that are in use.-- A list of dependencies between resources.-- Information about the complexity of the migration effort.-
-### Plan
-
-In the planning stage, complete the following tasks:
--- Use the output of the dependency analysis completed in the assessment stage to define related components. Consider migrating related components together in a *migration package*.-- (Optional) Use the migration as an opportunity to apply [Gartner 5-R criteria](https://www.gartner.com/en/documents/3873016/evaluation-criteria-for-cloud-management-platforms-and-t) and to optimize your workload.-- Determine the target environment in global Azure:
- 1. Identify the target global Azure tenant (create one, if necessary).
- 1. Create subscriptions.
- 1. Choose which global Azure location you want to migrate.
- 1. Execute test migration scenarios that match your architecture in Azure Germany with the architecture in global Azure.
-- Determine the appropriate timeline and schedule for migration. Create a user acceptance test plan for each migration package.-
-### Migrate
-
-In the migration phase, use the tools, techniques, and recommendations that are discussed in the Azure Germany migration articles to create new resources in global Azure. Then, configure applications.
-
-### Validate
-
-In the validation stage, complete the following tasks:
-
-1. Perform user acceptance testing.
-1. Ensure that applications are working as expected.
-1. Sync the latest data to the target environment, if applicable.
-1. Switch to a new application instance in global Azure.
-1. Verify that the production environment is working as expected.
-1. Decommission resources in Azure Germany.
-
-## Terms
-
-These terms are used in the Azure Germany migration articles:
-
-**Source** describes where you are migrating resources from (for example, Azure Germany):
--- **Source tenant name**: The name of the tenant in Azure Germany (everything after **\@** in the account name). Tenant names in Azure Germany all end in **microsoftazure.de**.-- **Source tenant ID**: The ID of the tenant in Azure Germany. The tenant ID appears in the Azure portal when you move the mouse over the account name in the upper-right corner.-- **Source subscription ID**: The ID of the resource subscription in Azure Germany. You can have more than one subscription in the same tenant. Always make sure that you're using the correct subscription.-- **Source region**: Either Germany Central (**germanycentral**) or Germany Northeast (**germanynortheast**), depending on where the resource you want to migrate is located.-
-**Target** or **destination** describes where you are migrating resources to:
--- **Target tenant name**: The name of the tenant in global Azure.-- **Target tenant ID**: The ID of the tenant in global Azure.-- **Target subscription ID**: The subscription ID for the resource in global Azure.-- **Target region**: You can use almost any region in global Azure. It's likely that you'll want to migrate your resources to West Europe (**westeurope**) or North Europe (**northeurope**).-
-> [!NOTE]
-> Verify that the Azure service you're migrating is offered in the target region. All Azure services that are available in Azure Germany are available in West Europe. All Azure services that are available in Azure Germany are available in North Europe, except for Machine Learning Studio (classic) and the G/GS VM series in Azure Virtual Machines.
-
-It's a good idea to bookmark the source and target portals in your browser:
--- The Azure Germany portal is at [https://portal.microsoftazure.de/](https://portal.microsoftazure.de/).-- The global Azure portal is at [https://portal.azure.com/](https://portal.azure.com/).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Management Tools https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-management-tools.md
- Title: Migrate Azure management tools from Azure Germany to global Azure
-description: This article provides information about migrating your Azure management tools from Azure Germany to global Azure.
- Previously updated : 06/22/2021------
-# Migrate management tool resources to global Azure
--
-This article has information that can help you migrate Azure management tools from Azure Germany to global Azure.
-
-## Traffic Manager
-
-Azure Traffic Manager can help you complete a smooth migration. However, you can't migrate Traffic Manager profiles that you create in Azure Germany to global Azure. (During a migration, you migrate Traffic Manager endpoints to the target environment, so you need to update the Traffic Manager profile anyway.)
-
-You can define additional endpoints in the target environment by using Traffic Manager while it's still running in the source environment. When Traffic Manager is running in the new environment, you can still define endpoints that you haven't yet migrated in the source environment. This scenario is known as the [Blue-Green scenario](https://azure.microsoft.com/blog/blue-green-deployments-using-azure-traffic-manager/). The scenario involves the following steps:
-
-1. Create a new Traffic Manager profile in global Azure.
-1. Define the endpoints in Azure Germany.
-1. Change your DNS CNAME record to the new Traffic Manager profile.
-1. Turn off the old Traffic Manager profile.
-1. Migrate and configure endpoints. For each endpoint in Azure Germany:
- 1. Migrate the endpoint to global Azure.
- 1. Change the Traffic Manager profile to use the new endpoint.
-
-For more information:
--- Refresh your knowledge by completing the [Traffic Manager tutorials](../traffic-manager/index.yml).-- Review the [Traffic Manager overview](../traffic-manager/traffic-manager-overview.md).-- Learn how to [create a Traffic Manager profile](../traffic-manager/quickstart-create-traffic-manager-profile.md).-- Read about the [Blue-Green scenario](https://azure.microsoft.com/blog/blue-green-deployments-using-azure-traffic-manager/).-
-## Azure Backup
-
-Azure Backup service provides simple, secure, and cost-effective solutions to back up your data and recover it from the Microsoft Azure cloud. The backup data move is now enabled from Germany Central (GEC) and Germany Northeast (GNE) to Germany West Central (GWC) via PowerShell cmdlets.
-
-### Prerequisite for moving hybrid workloads
-
-Once the move operation starts, backups are stopped in the existing vaults. So, itΓÇÖs important to protect your data in a new vault in GWC for hybrid workloads (Data Protection Manager (DPM) server/ Azure Backup Server (MABS)/ Microsoft Azure Recovery Services (MARS) before you start moving backup data from the regions.
-To start protecting in a new vault:
-
-1. Create a new vault (VaultN) in GWC.
-1. Re-register your DPM server/MABS/MARS agent to VaultN.
-1. Assign Policy and start taking backups.
-
-The initial backup will be a full copy followed by incremental backups.
-
->[!Important]
->- Before initiating the backup data move operation, ensure that the first full backup in VaultN is completed.
->- For DPM/MABS, maintain the passphrase from the original vault in a secure location as you need the same to restore data from the target vault. Without the original passphrase, restoration from the source vault is not possible.
-
-### Step 1: Download the resources
-
-Download and install the required resources.
-
-1. [Download](https://github.com/PowerShell/PowerShell/releases/tag/v7.0.3) the latest version of PowerShell (PowerShell 7).
-1. Use Az.RecoveryServices module version 4.2.0 available in Azure Cloud Shell.
-1. [Update](https://aka.ms/azurebackup_agent) all MARS agents to the latest version.
-1. Validate your passphrase. If you need to regenerate, follow the [validation steps](https://support.microsoft.com/topic/mandatory-update-for-azure-backup-for-microsoft-azure-recovery-services-agent-411e371c-aace-e134-de6b-bf9fda48026e#section-3).
-
-### Step 2: Create a target vault in GWC
-
-Create a Target Vault (Vault 2) in GWC. To learn how to create a vault, see [Create and configure a Recovery Services vault](../backup/backup-create-rs-vault.md).
-
->[!Note]
->- Ensure that the vault has no protected items.
->- Ensure that the target vault has necessary redundancy - locally redundant sorage (LRS) or geo-redundant storage (GRS).
-
-### Step 3: - Use PowerShell to trigger Backup data move
-
-#### Get the source vault from GNE or GEC
-
-Run these cmdlets:
-
-1. `Connect-AzAccount -Environment AzureGermanCloud`
-1. `Set-AzContext -Subscription "subscriptionName"`
-1. `$srcVault = Get-AzRecoveryServicesVault -name ΓÇ£srcVaultΓÇ¥ -ResourceGroupName ΓÇ£TestSourceRGΓÇ¥`
-
->[!Note]
->- `srcVault` = Source Vault
->- `TestSourceRG` = Source Resource Group
-
-#### Get the target vault in GWC
-
-Run these cmdlets:
-
-1. `Connect-AzAccount`
-1. `Set-AzContext -Subscription "subscriptionName"`
-1. `$trgVault = Get-AzRecoveryServicesVault -name ΓÇ£targetVaultΓÇ¥ -ResourceGroupName ΓÇ£TestTargetRGΓÇ¥`
-
->[!Note]
->- `targetVault` = Target Vault
->- `TestTargetRG` = Test Resource Group
-
-#### Perform validation
-
-Run these cmdlets:
-
-1. `$validated = $false`
-1. `$validated = Test-AzRecoveryServicesDSMove -SourceVault $srcVault -TargetVault $trgVault`
-
-#### Initialize/prepare DS move
-
-Run these cmdlets:
-
-1. `Connect-AzAccount -Environment AzureGermanCloud`
-1. `Set-AzContext -SubscriptionName $srcSub`
-1. ```azurepowershell
- if($validated) {
- $corr = Initialize-AzRecoveryServicesDSMove -SourceVault $srcVault -TargetVault $trgVault
- }
- ```
-1. `$corr`
-
-#### Trigger DS move
-
-Run these cmdlets:
-
-1. `Connect-AzAccount`
-1. `Set-AzContext -SubscriptionName $trgSub`
-1. `Copy-AzRecoveryServicesVault - CorrelationIdForDataMove $corr -SourceVault $srcVault -TargetVault $trgVault -Force`
-
-You can monitor the operation using the `Get-AzRecoveryServicesBackupJob` cmdlet.
-
->[!Note]
->- During the backup data move operation, all backup items are moved to a transient state. In this state, the new Recovery Points (RPs) are not created, and old RPs are not cleaned up.
->- As this feature is enabled in GEC and GNE, we recommend you to perform these steps on a small vault and validate the movement. On success, perform these steps on all vaults.
->- Along side the backup data move is triggered for the entire vault, the move happens per container (VMs, DPM and MABS servers, and MARS agents). Track the progress of the moves per container in the **Jobs** section.
-
- ![monitor progress of move jobs.](./media/germany-migration-management-tools/track-move-jobs.png)
-
-During the move operation, the following actions are blocked on the source vault:
--- New Scheduled Backups-- Stop backup with Delete data.-- Delete Data-- Resume Backup-- Modify Policy-
-### Step 4: Check the status of the move job
-
-The backup data move operation happens per container. For Azure VM backups, the VM backups are considered as the containers. To indicate progress of the backup data move operation, a job is created for every container.
-
-To monitor the jobs, run these cmdlets:
-
-1. `Get-AzRecoveryServicesBackupJob -Operation BackupDataMove -VaultId $trgVault.ID`
-1. `$Jobs = Get-AzRecoveryServicesBackupJob -Operation BackupDataMove -VaultId $trgVault.ID`
-1. `Get-AzRecoveryServicesBackupJobDetail -Job $Jobs[0] -VaultId $trgVault.ID`
-1. `$JobDetails.ErrorDetails`
-
-### Step 5: Post move operations
-
-Once the backup data move operation for all containers to the target vault is complete, no further action is required for VM backups.
---
-#### Verify the movement of containers is complete
-
-To check if all containers from the source vault have moved to the target vault, go to the target vault and check for all containers in that vault.
-
-Run the following cmdlet to list all VMs moved from the source vault to target vault:
-
-```azurepowershell
-Get-AzRecoveryServicesBackupContainer -BackupManagementType ΓÇ£AzureVMΓÇ¥ -VaultId $trgVault.ID
-```
-
-#### Verify the movement of policies is complete
-
-After the backup data is moved successfully to the new region, all policies that were applied to Azure VM backup items in the source vault are applied to the target vault.
-
-To verify if all policies have moved from the source vault to the target vault, go to the target vault and run the following cmdlet to get the list of all moved policies:
-
-```azurepowershell
-Get-AzRecoveryServicesBackupProtectionPolicy -VaultId $trgVault.ID
-```
-
-These policies continue to apply on your backup data after the move operation so that the lifecycle management of the moved recovery points is continued.
-
-To avoid sudden clean-up of several recovery points (that may have expired during the move process or may expire immediately after the move process), the clean-up of older recovery points (RPs) are paused for a period of 10 days after the move. During this period, you are not billed for the additional data incurred by the old RPs.
-
->[!Important]
->If you need to recover from these older RPs, recover them immediately the backup data move within this 10-day period. Once this safety period is complete, the policies applied on each of the backup items would take effect and will enforce clean-up of the old RPs.
-
-#### Restore operations
-
-**Restore Azure Virtual Machines**
-
-For Azure Virtual machines, you can restore from the recovery points in the target vault.
-
-#### Configure MARS agent
-
-1. Re-register to the target vault.
-1. Restore from the recovery points.
-1. Re-register Post Recovery to the new vault (VaultN) and resume backups.
-
->[!Note]
->While the MARS agent is registered to the target vault, no new backups take place.
-
-#### Configure DPM/MABS
-
-**Recommended**
-
-Use the External DPM method to perform restore. For more information, see [Recover data from Azure Backup Server](../backup/backup-azure-alternate-dpm-server.md).
-
->[!Note]
->- Original-Location Recovery (OLR) is not supported.
->- Backups will continue in VaultN for all the machines registered.
-
-**Other option**
-
-For Original-Location Recovery (OLR):
-
-1. Re-register the DPM server/MABS to the target vault.
-1. Perform restore operation.
-1. Re-register the DPM server/MABS back to the new vault.
-
->[!Note]
->Limitations of using DPM: <br><br> <ul><li>Backup operation for all the machines registered to the DPM server are stopped when you connect the DPM server to the target-vault.</li><li>After the DPM server is re-registered to the new vault after restore, consistency checks takes place (time taken to complete the same will depend on the amount of data) before resuming backups.</li></ul>
-
-### Error codes
-
-#### UserErrorConflictingDataMoveOnVault
-
-**Message:** There is another data move operation currently running on vault.
-
-**Scenario:** You are trying the data move operation on a source vault, while other data move operation is already running on the same source vault.
-
-**Recommended action:** Wait until the current data move operation completes, and then try again.
-
-#### UserErrorOperationNotAllowedDuringDataMove
-
-**Message:** This operation is not allowed since data move operation is in progress.
-
-**Scenarios:** While data move operation is in progress, following operations are not allowed in the source vault:
-
-- Stop Backup with Retain Data -- Stop Backup with delete data. -- Delete backup data. -- Resume backup -- Modify policy.-
-**Recommended action:** Wait until the data move operation completes, and then try again. [Learn more](#azure-backup) about the supported operations.
-
-#### UserErrorNoContainersFoundForDataMove
-
-**Message:** There are no containers in this vault which are supported for data move operation.
-
-**Scenarios:** This message displays if:
--- Source vault has no containers at all. -- Source vault has only unsupported containers. -- Source vault has all containers which are previously moved to some target vault and you have passed IgnoreMoved = true in the API.-
-**Recommended action:** [Learn](#azure-backup) about the supported containers for data move.
-
-#### UserErrorDataMoveNotSupportedAtContainerLevel
-
-**Message:** Data move operation is not supported at container level.
-
-**Scenario:** You have chosen a container level data move operation.
-
-**Recommended action:** Try the vault level data move operation.
-
-### UserErrorDataMoveNotAllowedContainer RegistrationInProgress
-
-**Message:** Data move operation is not allowed because a container registration operation is running in source vault.
-
-**Scenario:** A container registration operation is in progress in the source vault when you tried data move.
-
-**Recommended action:** Try the data move operation after some time.
-
-#### UserErrorDataMoveNotAllowedTargetVaultNotEmpty
-
-**Message:** Data move operation is not allowed because target vault has some containers already registered.
-
-**Scenario:** The chosen target vault has some containers already registered.
-
-**Recommended action:** Try the data move operation on an empty target vault.
-
-#### UserErrorUnsupportedSourceRegionForDataMove
-
-**Message:** Data move operation is not supported for this region.
-
-**Scenario:** Source region not valid.
-
-**Recommended action:** Check the [list of supported regions](#azure-backup) for data move.
-
-#### UserErrorUnsupportedTargetRegionForDataMove
-
-**Message:** Data move operation is not supported to this region.
-
-**Scenario:** Target region ID not valid.
-
-**Recommended action:** Check the [list of supported regions](#azure-backup) for data move.
--
-#### UserErrorDataMoveTargetVaultWithPrivate EndpointNotSupported
-
-**Message:** Data cannot be moved as selected target vault has private endpoints.
-
-**Scenario:** Private end points are enabled in the target vault.
-
-**Recommended action:** Delete the private endpoints and retry the move operation. [Learn more](#azure-backup) about the supported operations.
-
-### UserErrorDataMoveSourceVaultWithPrivate EndpointNotSupported
-
-**Message:** Data cannot be moved as selected source vault has private endpoints.
-
-**Scenario:** Private end points are enabled in the source vault.
-
-**Recommended action:** Delete the private endpoints and retry the move operation. [Learn more](../backup/private-endpoints.md#deleting-private-endpoints) about the supported operations.
-
-#### UserErrorDataMoveSourceVaultWithCMK NotSupported
-
-**Message:** Data cannot be moved as selected source vault is encryption enabled.
-
-**Scenario:** Customer-Managed Keys (CMK) are enabled in the source vault.
-
-**Recommended action:** [Learn](#azure-backup) about the supported operations.
-
-#### UserErrorDataMoveTargetVaultWithCMKNotSupported
-
-**Message:** Data cannot be moved as selected target vault is encryption enabled.
-
-**Scenario:** Customer-Managed Keys (CMK) are enabled in the target vault
-
-**Recommended action:** [Learn](#azure-backup) about the supported operations.
-
-## Azure Scheduler
-
-Azure Scheduler is fully retired since January 31, 2022.. To create scheduling jobs, use [Azure Logic Apps](../logic-apps/logic-apps-overview.md) in global Azure instead.
-
-For more information:
--- Learn more by completing the [Azure Logic Apps tutorials](../logic-apps/tutorial-build-schedule-recurring-logic-app-workflow.md).-- Review the [Azure Logic Apps overview](../logic-apps/logic-apps-overview.md).-
-## Network Watcher
-
-Migrating an Azure Network Watcher instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new Network Watcher instance in global Azure. Then, compare the results between the old and new environments.
-
-For more information:
--- Refresh your knowledge by completing the [Network Watcher tutorials](../network-watcher/index.yml).-- Review the [Network Watcher overview](../network-watcher/network-watcher-monitoring-overview.md).-- Learn about [Network security group flow logs](../network-watcher/network-watcher-nsg-flow-logging-portal.md).-- Read about [Connection Monitor](../network-watcher/connection-monitor.md).-
-## Site Recovery
-
-You can't migrate your current Azure Site Recovery setup to global Azure. You must set up a new Site Recovery solution in global Azure.
-
-For more information about Site Recovery and to learn how to migrate VMs from Azure Germany to global Azure, see [How to use Site Recovery](./germany-migration-compute.md#compute-iaas).
-
-Refresh your knowledge by completing these step-by-step tutorials:
--- [Azure-to-Azure disaster recovery](../site-recovery/azure-to-azure-about-networking.md)-- [VMware-to-Azure disaster recovery](../site-recovery/site-recovery-deployment-planner.md)-- [Hyper-V-to-Azure disaster recovery](../site-recovery/hyper-v-deployment-planner-overview.md)-
-## Azure policies
-
-You can't directly migrate policies from Azure Germany to global Azure. During a migration, the scope of assigned policies usually changes. It's especially true when the subscription is different in the target environment, as it is in this scenario. However, you can preserve policy definitions and reuse them in global Azure.
-
-In the Azure CLI, run the following command to list all policies in your current environment.
-
-> [!NOTE]
-> Be sure to switch to the AzureGermanCloud environment in the Azure CLI before you run the following commands.
--
-```azurecli
-az policy definition list --query '[].{Type:policyType,Name:name}' --output table
-```
-
-Export only policies that have the **PolicyType** value **Custom**. Export **policyRule** to a file. The following example exports the custom policy "Allow Germany Central Only" (short version: `allowgconly`) to a file in the current folder:
-
-```azurecli
-az policy definition show --name allowgconly --output json --query policyRule > policy.json
-```
-
-Your export file will look similar to the following example:
-
-```json
-{
- "if": {
- "not": {
- "equals": "germanycentral",
- "field": "location"
- }
- },
- "then": {
- "effect": "Deny"
- }
-}
-```
-
-Next, switch to the global Azure environment. Modify the policy rule by editing the file. For example, change `germanycentral` to `westeurope`.
-
-```json
-{
- "if": {
- "not": {
- "equals": "westeurope",
- "field": "location"
- }
- },
- "then": {
- "effect": "Deny"
- }
-}
-```
-
-Create the new policy:
-
-```azurecli
-cat policy.json |az policy definition create --name "allowweonly" --rules @-
-```
-
-You now have a new policy named `allowweonly`. The policy allows only West Europe as the region.
-
-Assign the policy to the scopes in your new environment as appropriate. You can document the old assignments in Azure Germany by running the following command:
-
-```azurecli
-az policy assignment list
-```
-
-For more information:
--- Refresh your knowledge by completing the [Azure policies tutorial](../governance/policy/tutorials/create-and-manage.md).-- Learn how to [view policies by using the Azure CLI](../governance/policy/tutorials/create-and-manage.md#view-policy-definitions-with-azure-cli) or [view policies by using PowerShell](../governance/policy/tutorials/create-and-manage.md#view-policy-definitions-with-powershell).-- Learn how to [create a policy definition by using the Azure CLI](../governance/policy/tutorials/create-and-manage.md#create-a-policy-definition-with-azure-cli) or [create a policy definition by using PowerShell](../governance/policy/tutorials/create-and-manage.md#create-a-policy-definition-with-powershell).-
-## Frequently asked questions
-
-### Where can I move the backup data?
-
-You can move your backup data from Recovery Services Vaults (RSVs) in Germany Central (GEC) and Germany Northeast (GNE) to Germany West Central (GWC).
-
-### What backup data can I move?
-
-From June 21, 2021, you can move the backup data for the following workloads from one region to another:
--- Azure Virtual Machines-- Hybrid Workloads-- Files/folder backup using Microsoft Azure Recovery Services (MARS) Agent-- Data Protection Manager (DPM) server-- Azure Backup Server (MABS)-
-### How can I move backup data to another region?
-
-To ensure that data in the existing regions are not lost, Azure Backup has enabled backup data move from GEC and GNE to GWC.
-
-While the migration happens, backups will stop in GEC and GNE. So, it is essential to protect the workloads in the new region before you start the migration operation.
-
-### What to do if the backup data move operation fails?
-
-The backup data can happen due to the following error scenarios:
-
-| Error messages | Causes |
-| | |
-| Please provide an empty target vault. The target vault should not have any backup items or backup containers. | You have chosen a target vault that already has some protected items. |
-| Azure Backup data is only allowed to be moved to supported target regions. | You have chosen a target vault from a region that is not one of the supported regions for move. |
-
-You need to retry backup from scratch by running the same command (given below) with a new empty target vault, or you may retry and move failed items from the source vault by indicating with a flag.
-
-```azurepowershell
- if($validated) {
- $corr = Initialize-AzRecoveryServicesDSMove -SourceVault $srcVault -TargetVault $trgVault -RetryOnlyFailed
- }
-```
-
-### Is there a cost involved in moving this backup data?
-
-No. There is no additional cost for moving your backup data from one region to another. Azure Backup bears the cost of moving data across regions. Once the move operation is complete, you will have a 10-day no billing period only. After this period, billing will start in the Target vault.
-
-### If I face issues in moving backup data, whom should I contact?
-
-For any issues with backup data move from GEC or GNE to GWC, write to us at [GESupportAzBackup@microsoft.com](mailto:GESupportAzBackup@microsoft.com).
-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Media https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-media.md
- Title: Migrate Azure media resources from Azure Germany to global Azure
-description: This article provides information about migrating your Azure media resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate media resources to global Azure
--
-This article has information that can help you migrate Azure media resources from Azure Germany to global Azure.
-
-## Media Services
-
-In Azure Media Services, you configure your own storage account and all media assets. First, create a new Media Services account in global Azure. Then, reload corresponding media artifacts and perform encoding and streaming under the new Media Services account.
-
-For more information:
--- Refresh your knowledge by completing the [Media Services tutorials](../media-services/previous/index.yml).-- Review the [Media Services overview](../media-services/previous/media-services-overview.md).-- Learn how to [create a Media Services account](../media-services/previous/media-services-portal-create-account.md).-
-## Media Player
-
-You can select multiple endpoints in Azure Media Player. You can stream your content from Azure Germany endpoints or global Azure endpoints.
-
-For more information, see [Azure Media Player](https://ampdemo.azureedge.net/azuremediaplayer.html).
-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)
germany Germany Migration Networking https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-networking.md
- Title: Migrate Azure network resource from Azure Germany to global Azure
-description: This article provides information about migrating your Azure network resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate network resources to global Azure
--
-Most networking services don't support migration from Azure Germany to global Azure. However, you can connect your networks in both cloud environments by using a site-to-site VPN.
--
-The steps you take to set up a site-to-site VPN between clouds are similar to the steps you take to deploy a site-to-site VPN between your on-premises network and Azure. Define a gateway in both clouds, and then tell the VPNs how to communicate with each other. [Create a site-to-site connection in the Azure portal](../vpn-gateway/tutorial-site-to-site-portal.md) describes the steps you complete to deploy a site-to-site VPN. Here's a summary of the steps:
-
-1. Define a virtual network.
-1. Define address space.
-1. Define subnets.
-1. Define a gateway subnet.
-1. Define a gateway for the virtual network.
-1. Define a gateway for the local network (your local VPN device).
-1. Configure a local VPN device.
-1. Build the connection.
-
-To connect virtual networks between global Azure and Azure Germany:
-
-1. Complete steps 1-5 in the preceding procedure in global Azure.
-1. Complete steps 1-5 in Azure Germany.
-1. Complete step 6 in global Azure:
- - Enter the public IP address of the VPN gateway in Azure Germany.
-1. Complete step 6 in Azure Germany:
- - Enter the public IP address of the VPN gateway in global Azure.
-1. Skip step 7.
-1. Complete step 8.
-
-## Virtual networks
-
-Migrating virtual networks from Azure Germany to global Azure isn't supported at this time. We recommend that you create new virtual networks in the target region and migrate resources into those virtual networks.
-
-For more information:
--- Refresh your knowledge by completing the [Azure Virtual Network tutorials](../virtual-network/index.yml).-- Review the [virtual networks overview](../virtual-network/virtual-networks-overview.md).-- Learn how to [plan virtual networks](../virtual-network/virtual-network-vnet-plan-design-arm.md).-
-## Network security groups
-
-Migrating network security groups from Azure Germany to global Azure isn't supported at this time. We recommend that you create new network security groups in the target region and apply the network security groups rules to the new application environment.
-
-Get the current configuration of any network security group from the portal or by running the following PowerShell commands:
-
-```powershell
-$nsg=Get-AzNetworkSecurityGroup -ResourceName <nsg-name> -ResourceGroupName <resourcegroupname>
-Get-AzNetworkSecurityRuleConfig -NetworkSecurityGroup $nsg
-```
-
-For more information:
--- Refresh your [knowledge about network security groups](../virtual-network/network-security-groups-overview.md#network-security-groups).-- Review the [network security overview](../virtual-network/network-security-groups-overview.md)-- Learn how to [manage network security groups](../virtual-network/manage-network-security-group.md).-
-## ExpressRoute
-
-Migrating an Azure ExpressRoute instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create new ExpressRoute circuits and a new ExpressRoute gateway in global Azure.
-
-For more information:
--- Refresh your knowledge by completing the [ExpressRoute tutorials](../expressroute/index.yml).-- Learn how to [create a new ExpressRoute gateway](../expressroute/expressroute-howto-add-gateway-portal-resource-manager.md).-- Learn about [ExpressRoute locations and service providers](../expressroute/expressroute-locations.md).-- Read about [virtual network gateways for ExpressRoute](../expressroute/expressroute-about-virtual-network-gateways.md).-
-## VPN Gateway
-
-Migrating an Azure VPN Gateway instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new instance of VPN Gateway in global Azure.
-
-You can collect information about your current VPN Gateway configuration by using the portal or PowerShell. In PowerShell, use a set of cmdlets that begin with `Get-AzVirtualNetworkGateway*`.
-
-Make sure that you update your on-premises configuration. Also, delete any existing rules for the old IP address ranges after you update your Azure network environment.
-
-For more information:
--- Refresh your knowledge by completing the [VPN Gateway tutorials](../vpn-gateway/index.yml).-- Learn how to [create a site-to-site connection](../vpn-gateway/tutorial-site-to-site-portal.md).-- Review the [Get-AzVirtualNetworkGateway](/powershell/module/az.network/get-azvirtualnetworkgateway) PowerShell cmdlets.-- Read the blog post [Create a site-to-site connection](/archive/blogs/ralfwi/connecting-clouds).
-
-## Application Gateway
-
-Migrating an Azure Application Gateway instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new gateway in global Azure.
-
-You can collect information about your current gateway configuration by using the portal or PowerShell. In PowerShell, use a set of cmdlets that begin with `Get-AzApplicationGateway*`.
-
-For more information:
--- Refresh your knowledge by completing the [Application Gateway tutorials](../web-application-firewall/ag/application-gateway-web-application-firewall-portal.md).-- Learn how to [create an application gateway](../application-gateway/quick-create-portal.md).-- Review the [Get-AzApplicationGateway](/powershell/module/az.network/get-azapplicationgateway) PowerShell cmdlets.-
-## DNS
-
-To migrate your Azure DNS configuration from Azure Germany to global Azure, export the DNS zone file, and then import it under the new subscription. Currently, the only way to export the zone file is by using the Azure CLI.
-
-After you sign in to your source subscription in Azure Germany, configure the Azure CLI to use Azure Resource Manager mode. Export the zone by running this command:
-
-```azurecli
-az network dns zone export -g <resource group> -n <zone name> -f <zone file name>
-```
-
-Example:
-
-```azurecli
-az network dns zone export -g "myresourcegroup" -n "contoso.com" -f "contoso.com.txt"
-```
-
-This command calls the Azure DNS service to export the zone `contoso.com` in the resource group `myresourcegroup`. The output is stored as a BIND-compatible zone file in contoso.com.txt in the current folder.
-
-When the export is finished, delete the NS records from the zone file. New NS records are created for the new region and subscription.
-
-Now, sign in to your target environment, create a new resource group (or select an existing one), and then import the zone file:
-
-```azurecli
-az network dns zone import -g <resource group> -n <zone name> -f <zone file name>
-```
-
-When the zone has been imported, you must validate the zone by running the following command:
-
-```azurecli
-az network dns record-set list -g <resource group> -z <zone name>
-```
-
-When validation is finished, contact your domain registrar and redelegate the NS records. To get NS record information, run this command:
-
-```azurecli
-az network dns record-set ns list -g <resource group> -z --output json
-```
-
-For more information:
--- Refresh your knowledge by completing the [Azure DNS tutorials](../dns/index.yml).-- Review the [Azure DNS overview](../dns/dns-overview.md).-- Learn more about [Azure DNS import and export](../dns/dns-import-export.md).-
-## Network Watcher
-
-Migrating an Azure Network Watcher instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new Network Watcher instance in global Azure. Afterward, compare results between the old and new environments.
-
-For more information:
--- Refresh your knowledge by completing the [Network Watcher tutorials](../network-watcher/index.yml).-- Review the [Network Watcher overview](../network-watcher/network-watcher-monitoring-overview.md).-- Learn more about [network security group flow logs](../network-watcher/network-watcher-nsg-flow-logging-portal.md).-- Read about [Connection Monitor](../network-watcher/connection-monitor.md).-
-## Traffic Manager
-
-Azure Traffic Manager can help you complete a smooth migration. However, you can't migrate Traffic Manager profiles that you create in Azure Germany to global Azure. (During a migration, you migrate Traffic Manager endpoints to the target environment, so you need to update the Traffic Manager profile anyway.)
-
-You can define additional endpoints in the target environment by using Traffic Manager while it's still running in the source environment. When Traffic Manager is running in the new environment, you can still define endpoints that you haven't yet migrated in the source environment. This scenario is known as the [Blue-Green scenario](https://azure.microsoft.com/blog/blue-green-deployments-using-azure-traffic-manager/). The scenario involves the following steps:
-
-1. Create a new Traffic Manager profile in global Azure.
-1. Define the endpoints in Azure Germany.
-1. Change your DNS CNAME record to the new Traffic Manager profile.
-1. Turn off the old Traffic Manager profile.
-1. Migrate and configure endpoints. For each endpoint in Azure Germany:
- 1. Migrate the endpoint to global Azure.
- 1. Change the Traffic Manager profile to use the new endpoint.
-
-For more information:
--- Refresh your knowledge by completing the [Traffic Manager tutorials](../traffic-manager/index.yml).-- Review the [Traffic Manager overview](../traffic-manager/traffic-manager-overview.md).-- Learn how to [create a Traffic Manager profile](../traffic-manager/quickstart-create-traffic-manager-profile.md).-
-## Load Balancer
-
-Migrating an Azure Load Balancer instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new load balancer in global Azure.
-
-For more information:
--- Refresh your knowledge by completing the [Load Balancer tutorials](../load-balancer/index.yml).-- Review the [Load Balancer overview](../load-balancer/load-balancer-overview.md).-- Learn how to [create a new load balancer](../load-balancer/quickstart-load-balancer-standard-public-portal.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Security https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-security.md
- Title: Migrate Azure security resources, Azure Germany to global Azure
-description: This article provides information about migrating your Azure security resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate security resources to global Azure
--
-This article has information that can help you migrate Azure security resources from Azure Germany to global Azure.
--
-## Azure Active Directory
-
-For information about migrating Azure Active Directory, see [Migrate identities](./germany-migration-identity.md#azure-active-directory).
-
-## Key Vault
-
-Some features of Azure Key Vault can't be migrated from Azure Germany to global Azure.
-
-### Encryption keys
-
-You can't migrate encryption keys. Create new keys in the target region, and then use the keys to protect the target resource (for example, Azure Storage or Azure SQL Database). Securely migrate the data from the old region to the new region.
-
-### Application secrets
-
-Application secrets are certificates, storage account keys, and other application-related secrets. During a migration, first create a new key vault in global Azure. Then, complete one of the following actions:
--- Create new application secrets.-- Read the current secrets in Azure Germany, and then enter the value in the new vault.-
-```powershell
-Get-AzKeyVaultSecret -vaultname mysecrets -name Deploydefaultpw
-```
-
-For more information:
--- Refresh your knowledge by completing the [Key Vault tutorials](../key-vault/index.yml).-- Review the [Key Vault overview](../key-vault/general/overview.md).-- Review the [Key Vault PowerShell cmdlets](/powershell/module/az.keyvault/).-
-## VPN Gateway
-
-Migrating an Azure VPN Gateway instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new instance of VPN Gateway in global Azure.
-
-You can collect information about your current VPN Gateway configuration by using the portal or PowerShell. In PowerShell, use a set of cmdlets that begin with `Get-AzVirtualNetworkGateway*`.
-
-Make sure that you update your on-premises configuration. Also, delete any existing rules for the old IP address ranges after you update your Azure network environment.
-
-For more information:
--- Refresh your knowledge by completing the [VPN Gateway tutorials](../vpn-gateway/index.yml).-- Learn how to [create a site-to-site connection](../vpn-gateway/tutorial-site-to-site-portal.md).-- Review the [Get-AzVirtualNetworkGateway](/powershell/module/az.network/get-azvirtualnetworkgateway) PowerShell cmdlets.-- Read the blog post [Create a site-to-site connection](/archive/blogs/ralfwi/connecting-clouds).
-
-## Application Gateway
-
-Migrating an Azure Application Gateway instance from Azure Germany to global Azure isn't supported at this time. We recommend that you create and configure a new gateway in global Azure.
-
-You can collect information about your current gateway configuration by using the portal or PowerShell. In PowerShell, use a set of cmdlets that begin with `Get-AzApplicationGateway*`.
-
-For more information:
--- Refresh your knowledge by completing the [Application Gateway tutorials](../web-application-firewall/ag/application-gateway-web-application-firewall-portal.md).-- Learn how to [create an application gateway](../application-gateway/quick-create-portal.md).-- Review the [Get-AzApplicationGateway](/powershell/module/az.network/get-azapplicationgateway) PowerShell cmdlets.-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Storage https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-storage.md
- Title: Migrate Azure storage resource from Azure Germany to global Azure
-description: This article provides information about migrating your Azure storage resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate storage resources to global Azure
--
-This article has information that can help you migrate Azure storage resources from Azure Germany to global Azure.
--
-## Blobs
-
-AzCopy is a free tool you can use to copy blobs, files, and tables. AzCopy works for Azure-to-Azure, on-premises-to-Azure, and Azure-to-on-premises migrations. Use AzCopy for your migration to copy blobs directly from Azure Germany to global Azure.
-
-If you don't use managed disks for your source VM, use AzCopy to copy the .vhd files to the target environment. Otherwise, you must complete some steps in advance. For more information, see [Recommendations for managed disks](#managed-disks).
-
-The following example shows how AzCopy works. For a complete reference, see the [AzCopy documentation](../storage/common/storage-use-azcopy-v10.md).
-
-AzCopy uses the terms **Source** and **Dest**, expressed as URIs. URIs for Azure Germany always have this format:
-
-```http
-https://<storageaccountname>.blob.core.cloudapi.de/<containername>/<blobname>
-```
-
-URIs for global Azure always have this format:
-
-```http
-https://<storageaccountname>.blob.core.windows.net/<containername>/<blobname>
-```
-
-You get the three parts of the URI (*storageaccountname*, *containername*, *blobname*) from the portal, by using PowerShell, or by using the Azure CLI. The name of the blob can be part of the URI or it can be given as a pattern, like *vm121314.vhd*.
-
-You also need the storage account keys to access the Azure Storage account. Get them from the portal, by using PowerShell, or by using the CLI. For example:
-
-```powershell
-Get-AzStorageAccountKey -Name <saname> -ResourceGroupName <rgname>
-```
-
-As always, you need only one of the two keys for each storage account.
-
-Example:
-
-URI part | example value
| --
-Source storageAccount | `migratetest`
-Source container | `vhds`
-Source blob | `vm-121314.vhd`
-Target storageAccount | `migratetarget`
-Target container | `targetcontainer`
-
-This command copies a virtual hard disk from Azure Germany to global Azure (keys are shortened to improve readability):
-
-```cmd
-azcopy -v /source:https://migratetest.blob.core.cloudapi.de/vhds /sourcekey:"0LN...w==" /dest:https://migratetarget.blob.core.windows.net/targetcontainer /DestKey:"o//ucDi5TN...w==" /Pattern:vm-121314.vhd
-```
-
-To get a consistent copy of the VHD, shut down the VM before you copy the VHD. Plan some downtime for the copy activity. When the VHD is copied, [rebuild your VM in the target environment](../backup/backup-azure-vms-automation.md#create-a-vm-from-restored-disks).
-
-For more information:
--- Review the [AzCopy documentation](../storage/common/storage-use-azcopy-v10.md).-- Learn how to [create a VM from restored disks](../backup/backup-azure-vms-automation.md#create-a-vm-from-restored-disks).-
-## Managed Disks
-
-Azure Managed Disks simplifies disk management for Azure infrastructure as a service (IaaS) VMs by managing the storage accounts that are associated with the VM disk.
-
-Because you don't have direct access to the .vhd file, you can't directly use tools like AzCopy to copy your files (see [Blobs](#blobs)). The workaround is to first export the managed disk by getting a temporary shared access signature URI, and then download it or copy it by using this information. The following sections show an example of how to get the shared access signature URI and what to do with it.
-
-### Step 1: Get the shared access signature URI
-
-1. In the portal, search for your managed disk. (It's in the same resource group as your VM. The resource type is **Disk**.)
-1. On the **Overview** page, select the **Export** button in the top menu (you have to shut down and deallocate your VM first, or unattach the VM).
-1. Define a time for the URI to expire (the default is 3,600 seconds).
-1. Generate a URL (this step should take only a few seconds).
-1. Copy the URL (it appears only once).
-
-### Step 2: AzCopy
-
-For examples of how to use AzCopy, see [Blobs](#blobs). Use AzCopy (or a similar tool) to copy the disk directly from your source environment to the target environment. In AzCopy, you have to split the URI into the base URI and the shared access signature part. The shared access signature part of the URI begins with the character "**?**". The portal provides this URI for the shared access signature URI:
-
-```http
-https://md-kp4qvrzhj4j5.blob.core.cloudapi.de/r0pmw4z3vk1g/abcd?sv=2017-04-17&sr=b&si=22970153-4c56-47c0-8cbb-156a24b6e4b5&sig=5Hfu0qMw9rkZf6mCjuCE4VMV6W3IR8FXQSY1viji9bg%3D>
-```
-
-The following commands show the source parameters for AzCopy:
-
-```cmd
-/source:"https://md-kp4qvrzhj4j5.blob.core.cloudapi.de/r0pmw4z3vk1g/abcd"
-```
-
-```cmd
-/sourceSAS:" ?sv=2017-04-17&sr=b&si=22970153-4c56-47c0-8cbb-156a24b6e4b5&sig=5Hfu0qMw9rkZf6mCjuCE4VMV6W3IR8FXQSY1viji9bg%3D"
-```
-
-Here's the complete command:
-
-```cmd
-azcopy -v /source:"https://md-kp4qvrzhj4j5.blob.core.cloudapi.de/r0pmw4z3vk1g/abcd" /sourceSAS:"?sv=2017-04-17&sr=b&si=22970153-4c56-47c0-8cbb-156a24b6e4b5&sig=5Hfu0qMw9rkZf6mCjuCE4VMV6W3IR8FXQSY1viji9bg%3D" /dest:"https://migratetarget.blob.core.windows.net/targetcontainer/newdisk.vhd" /DestKey:"o//ucD... Kdpw=="
-```
-
-### Step 3: Create a new managed disk in the target environment
-
-You have several options for creating a new managed disk. Here's how to do it in the Azure portal:
-
-1. In the portal, select **New** > **Managed Disk** > **Create**.
-1. Enter a name for the new disk.
-1. Select a resource group.
-1. Under **Source type**, select **Storage blob**. Then, either copy the destination URI from the AzCopy command or browse to select the destination URI.
-1. If you copied an OS disk, select the **OS** type. For other disk types, select **Create**.
-
-### Step 4: Create the VM
-
-As noted earlier, there are multiple ways to create a VM by using this new managed disk. Here are two options:
--- In the portal, select the disk, and then select **Create VM**. Define the other parameters of your VM as usual.-- For PowerShell, see [Create a VM from restored disks](../backup/backup-azure-vms-automation.md#create-a-vm-from-restored-disks).-
-For more information:
--- Learn how to export to disk [via API](/rest/api/compute/disks/grantaccess) by getting a shared access signature URI. -- Learn how to create a managed disk [via API](/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-importing-an-unmanaged-blob-from-a-different-subscription.) from an unmanaged blob.-
-## Tables
-
-You can migrate tables in Azure using Storage Explorer. Storage Explorer is a tool to manage your Azure cloud storage resources. Using Storage Explorer, you can connect to the source Germany storage account and copy tables to the target Azure global storage account.
-
-To begin, install [Azure Storage Explorer](https://azure.microsoft.com/features/storage-explorer/).
-
-### Connect to source
-
-You use Storage Explorer to copy tables from the source Azure Storage account.
-
-Connect Storage Explorer to the your source table resources in Microsoft Azure Germany. You can [sign in to access resources in your subscription](../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#sign-in-to-azure) or you can [attach to specific Storage resources](../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#attach-to-an-individual-resource).
-
-### Connect to target
-
-You use Storage Explorer to paste tables to the target Azure Storage account.
-
-Connect Storage Explorer to your target Microsoft Azure subscription or Azure Storage. You can [sign in to access resources in your subscription](../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#sign-in-to-azure) or you can [attach to specific Storage resources](../vs-azure-tools-storage-manage-with-storage-explorer.md?tabs=windows#attach-to-an-individual-resource).
--
-### Migrate tables
-
-Copy tables from Azure Germany to an Azure global using Storage Explorer. You can copy tables by right clicking the table you want to copy and choosing **Copy table** from the shortcut menu. The following example shows copying the *testmigrationtable* from an *Azure Germany subscription*.
-
-![Copy table menu selected from Azure Germany subscription](./media/germany-migration-storage/copy-table.png)
-
-Paste the table into the target Azure Storage account using Storage Explorer. You can past tables by right clicking the *Tables* node within the target Azure Storage account. The following example shows pasting the *testmigrationtable* to a connected Azure Storage account.
-
-![Paste table menu selected from target Azure Storage](./media/germany-migration-storage/paste-table.png)
-
-Repeat the copy and paste steps for each table you want to migrate.
-
-## File shares
-
-Use AzCopy for your migration to copy file shares directly from Azure Germany to global Azure. AzCopy is a free tool you can use to copy blobs, files, and tables.
-
-To begin, [download AzCopy](https://aka.ms/downloadazcopy) and install.
-
-AzCopy uses the terms **Source** and **Dest**, expressed as URIs. URIs for Azure Germany always have this format:
-
-```http
-https://<storageaccountname>.blob.core.cloudapi.de/<filesharename>
-```
-
-URIs for global Azure always have this format:
-
-```http
-https://<storageaccountname>.blob.core.windows.net/<filesharename>
-```
-You need a storage account SAS token to access the Azure Storage account.
-
-The following example command copies all file shares, directories, and files from an Azure Germany storage account to a global Azure storage account. For a complete reference, see the [AzCopy documentation](../storage/common/storage-use-azcopy-v10.md).
-
-URI part | Example value
| --
-Source storageAccount | `migratetest`
-Source file share | `sourcefileshare`
-Target storageAccount | `migratetarget`
-Target fileshare | `targetfileshare`
-
-```cmd
-azcopy copy "https://migratetest.blob.core.cloudapi.de/sourcefileshare?<SAS-token>" "https://migratetarget.blob.core.windows.net/targetfileshare?<SAS-token>" --recursive=true
-```
-
-For more information about AzCopy, see the [AzCopy documentation](../storage/common/storage-use-azcopy-v10.md) and [Transfer data with AzCopy and file storage](../storage/common/storage-use-azcopy-files.md#copy-files-between-storage-accounts).
-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Web](./germany-migration-web.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Migration Web https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-migration-web.md
- Title: Migrate Azure web resources from Azure Germany to global Azure
-description: This article provides information about migrating your Azure web resources from Azure Germany to global Azure.
- Previously updated : 10/16/2020------
-# Migrate web resources to global Azure
--
-This article has information that can help you migrate Azure web resources from Azure Germany to global Azure.
-
-## Web Apps
-
-Migrating apps that you created by using the Web Apps feature of Azure App Service from Azure Germany to global Azure isn't supported at this time. We recommend that you export a web app as an Azure Resource Manager template. Then, redeploy after you change the location property to the new destination region.
-
-> [!IMPORTANT]
-> Change location, Azure Key Vault secrets, certificates, and other GUIDs to be consistent with the new region.
-
-### Migrate Web App resource
-
-1. [Export Web App and App Service plan as a template](../azure-resource-manager/templates/export-template-portal.md) from your Azure Germany subscription. Select the resources you want to migrate in your web app resource group and export as a template.
-1. Download the template as a zip file.
-1. Edit the location property in the **template.json** file to the target Azure global region. For example, the following JSON file has a target location of *West US*.
-
- ```json
- "resources": [
- {
- "type": "Microsoft.Web/serverfarms",
- "apiVersion": "2018-02-01",
- "name": "[parameters('serverfarms_myappservice_name')]",
- "location": "West US",
-
- ```
-1. Deploy the modified template to Azure global. For example, you can use PowerShell to deploy.
-
- ```powershell
- az deployment group create --name "<web app name>" \
- --resource-group "<resource group name>" \
- --template-file "<path of your template.json file>"
- ```
-
-### Migrate Web App content
-
-1. In the Azure Germany portal, select your Web App.
-1. Select **Development Tools > Advanced Tools**.
-1. From the top menu, select **Debug console** then choose **PowerShell**.
-1. Select **site**.
-1. Select the **download icon** beside the **wwwroot** folder. The downloaded zip file contains source code of your web app.
-1. Deploy the web root to the migrated Azure global web app. For example, you can use the following PowerShell script.
-
- ``` powershell
- az webapp deployment source config-zip \
- --resource-group "<resource group name>" \
- --name "<web App name>" \
- --src "path to webroot folder zip file"
- ```
-
-For more information:
--- Refresh your knowledge by completing the [App Service tutorials](../app-service/tutorial-dotnetcore-sqldb-app.md).-- Get information about how to [export Azure Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md).-- Review the [Azure Resource Manager overview](../azure-resource-manager/management/overview.md).-- Review the [App Service overview](../app-service/overview.md).-- Get an [overview of Azure locations](https://azure.microsoft.com/global-infrastructure/locations/).-- Learn how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Notification Hubs
-
-To migrate settings from one Azure Notification Hubs instance to another instance, export and import all registration tokens with their tags:
-
-1. [Export the existing notification hub registrations](/previous-versions/azure/azure-services/dn790624(v=azure.100)) to an Azure Blob storage container.
-1. Create a new notification hub in the target environment.
-1. [Import your registration tokens](/previous-versions/azure/azure-services/dn790624(v=azure.100)) from Blob storage to your new notification hub.
-
-For more information:
--- Refresh your knowledge by completing the [Notification Hubs tutorials](../notification-hubs/notification-hubs-android-push-notification-google-fcm-get-started.md).-- Review the [Notification Hubs overview](../notification-hubs/notification-hubs-push-notification-overview.md).-
-## Event Hubs
-
-To migrate an Azure Event Hub, you export the Event Hub resource template from Azure Germany then deploy the template to global Azure.
-
-1. [Export Event Hub as a template](../azure-resource-manager/templates/export-template-portal.md) from your Azure Germany subscription.
-1. [Deploy Event Hub template as a custom template](../azure-resource-manager/templates/deploy-portal.md#deploy-resources-from-custom-template) to your global Azure subscription. Load and deploy the template you exported from your Azure Germany subscription.
-
-For more information:
--- Review the [Event Hubs overview](../event-hubs/event-hubs-about.md).-- Review the [Azure Resource Manager overview](../azure-resource-manager/management/overview.md).-- Get information about how to [export Azure Resource Manager templates](../azure-resource-manager/templates/export-template-portal.md).-- Learn how to [redeploy a template](../azure-resource-manager/templates/deploy-powershell.md).-
-## Next steps
-
-Learn about tools, techniques, and recommendations for migrating resources in the following service categories:
--- [Compute](./germany-migration-compute.md)-- [Networking](./germany-migration-networking.md)-- [Storage](./germany-migration-storage.md)-- [Databases](./germany-migration-databases.md)-- [Analytics](./germany-migration-analytics.md)-- [IoT](./germany-migration-iot.md)-- [Integration](./germany-migration-integration.md)-- [Identity](./germany-migration-identity.md)-- [Security](./germany-migration-security.md)-- [Management tools](./germany-migration-management-tools.md)-- [Media](./germany-migration-media.md)
germany Germany Overview Data Trustee https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-overview-data-trustee.md
- Title: Azure Germany data trustee | Microsoft Docs
-description: This article provides information about the data trustee model. You will also find relevant links.
- Previously updated : 10/16/2020------
-# Data trustee principle
--
-Microsoft developed Azure Germany to help meet the needs of customers and prospective customers in the European Union (EU), the European Free Trade Association (EFTA), and the UK. These needs include addressing concerns about security and privacy of online data. Azure Germany comprises industry-leading cloud services *hosted and operated entirely under special security measures in Germany.*
-
-A local company controls access to customer data, as long as access is not granted by the customer or end users. This model ensures that the data owners keep
-sovereignty and the decision-making powers over their data--especially against third parties such as domestic or foreign supervisory and law enforcement authorities. These authorities can access customer data only in regulatory and compliant cases under the requirements of the German law.
-
-Azure Germany offers a separate instance of Microsoft Azure services from within German datacenters. The datacenters are in two locations, Frankfurt/Main and Magdeburg. This placement ensures that customer data remains in Germany and that the datacenters connect to each other through a
-private network. All customer data is exclusively stored in those datacenters. A designated German company--the German data trustee--controls access to customer data and the systems and infrastructure that hold customer data.
-
-The German data trustee is an independent company that is headquartered, incorporated, owned, and controlled in Germany and is subject to German law. For Microsoft Azure Germany, T-Systems International, a subsidiary company of Deutsche Telekom, has been contracted as the data trustee. The main characteristic of the data trustee model is that Microsoft is granted access to customer data only in contractually compliant cases by and under the supervision of the data trustee or the customer. At the same time, Microsoft Azure Germany integrates state-of-the-art security measures and global standards for datacenters.
-
-Microsoft administers all aspects of operation and provisioning of Azure Germany services that do not require access to customer data. It ensures that there is no connection with other Microsoft global cloud services. Microsoft remains responsible to its customers for service level agreements (SLAs), and for most operational aspects that do not enable access to customer data.
-
-In contrast, the German data trustee is responsible for all tasks and processes that require physical or logical access to infrastructure in
-Germany that stores customer data. The data trustee controls all access to customer data other than access initiated by the customer or end users.
-
-The following features are characteristic for Azure Germany:
-
-* All data that the customer provides through its use of Azure Germany is stored in German datacenters only.
-* Physical access to the datacenters is monitored and controlled through a well-known German data trustee (T-Systems International).
-* Depending on the service used, data is constantly synchronized between the datacenters to ensure continuity of business processes and to enable emergency recovery.
-* Access to customer data is under the control of the data trustee.
-* The data trustee operates under German law.
-
-For more information, see the [Microsoft trustee compliance model](https://gallery.technet.microsoft.com/Cloud-Germany-Compliance-4161d8df).
germany Germany Services Compute https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-compute.md
- Title: Azure Germany compute services | Microsoft Docs
-description: This topic provides a detailed comparison of compute services for Azure Germany.
- Previously updated : 10/16/2020------
-# Azure Germany compute services
--
-## Virtual Machines
-For details on the Azure Virtual Machines service and how to use it, see [Sizes for Windows virtual machines in Azure](../virtual-machines/sizes.md?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
-
-### Variations
-These products (virtual machines) are available in Azure Germany:
-
-| Products | Germany Central | Germany Northeast |
-| | | |
-| Basic_A0 | Y | Y |
-| Basic_A1 | Y | Y |
-| Basic_A2 | Y | Y |
-| Basic_A3 | Y | Y |
-| Basic_A4 | Y | Y |
-| | | |
-| Standard_A1_v2 | Y | Y |
-| Standard_A2_v2 | Y | Y |
-| Standard_A4_v2 | Y | Y |
-| Standard_A8_v2 | Y | Y |
-| Standard_A2m_v2 | Y | Y |
-| Standard_A4m_v2 | Y | Y |
-| Standard_A8m_v2 | Y | Y |
-| | | |
-| Standard_D1_v2 | Y | Y |
-| Standard_D2_v2 | Y | Y |
-| Standard_D3_v2 | Y | Y |
-| Standard_D4_v2 | Y | Y |
-| Standard_D5_v2 | Y | Y |
-| Standard_DS1_v2 | Y | Y |
-| Standard_DS2_v2 | Y | Y |
-| Standard_DS3_v2 | Y | Y |
-| Standard_DS4_v2 | Y | Y |
-| Standard_DS5_v2 | Y | Y |
-| | | |
-| Standard_D1 | N | N |
-| Standard_D2 | N | N |
-| Standard_D3 | N | N |
-| Standard_D4 | N | N |
-| Standard_DS1 | N | N |
-| Standard_DS2 | N | N |
-| Standard_DS3 | N | N |
-| Standard_DS4 | N | N |
-| | | |
-| Standard_A0 | Y | Y |
-| Standard_A1 | Y | Y |
-| Standard_A2 | Y | Y |
-| Standard_A3 | Y | Y |
-| Standard_A5 | Y | Y |
-| Standard_A4 | Y | Y |
-| Standard_A6 | Y | Y |
-| Standard_A7 | Y | Y |
-| | | |
-| Standard_F1 | Y | Y |
-| Standard_F2 | Y | Y |
-| Standard_F4 | Y | Y |
-| Standard_F8 | Y | Y |
-| Standard_F16 | Y | Y |
-| Standard_F1s | Y | Y |
-| Standard_F2s | Y | Y |
-| Standard_F4s | Y | Y |
-| Standard_F8s | Y | Y |
-| Standard_F16s | Y | Y |
-| | | |
-| Standard_D11_v2 | Y | Y |
-| Standard_D12_v2 | Y | Y |
-| Standard_D13_v2 | Y | Y |
-| Standard_D14_v2 | Y | Y |
-| Standard_D15_v2 | Y | Y |
-| Standard_DS11_v2 | Y | Y |
-| Standard_DS12_v2 | Y | Y |
-| Standard_DS13_v2 | Y | Y |
-| Standard_DS14_v2 | Y | Y |
-| Standard_DS15_v2 | Y | Y |
-| | | |
-| Standard_D11 | N | N |
-| Standard_D12 | N | N |
-| Standard_D13 | N | N |
-| Standard_D14 | N | N |
-| Standard_DS11 | N | N |
-| Standard_DS12 | N | N |
-| Standard_DS13 | N | N |
-| Standard_DS14 | N | N |
-| | | |
-| Standard_G1 | Y | N |
-| Standard_G2 | Y | N |
-| Standard_G3 | Y | N |
-| Standard_G4 | Y | N |
-| Standard_G5 | Y | N |
-| Standard_GS1 | Y | N |
-| Standard_GS2 | Y | N |
-| Standard_GS3 | Y | N |
-| Standard_GS4 | Y | N |
-| Standard_GS5 | Y | N |
-| | | |
-| Standard_L4 | N | N |
-| Standard_L8 | N | N |
-| Standard_L16 | N | N |
-| Standard_L32 | N | N |
-| | | |
-| Standard_NC6 | N | N |
-| Standard_NC12 | N | N |
-| Standard_NC24 | N | N |
-| Standard_NC24r | N | N |
-| | | |
-| Standard_NV6 | N | N |
-| Standard_NV12 | N | N |
-| Standard_NV24 | N | N |
-| | | |
-| Standard_H8 | N | N |
-| Standard_H16 | N | N |
-| Standard_H8m | N | N |
-| Standard_H16m | N | N |
-| Standard_H16mr | N | N |
-| Standard_H16r | N | N |
-| | | |
-| Standard_A8 | N | N |
-| Standard_A9 | N | N |
-| Standard_A10 | N | N |
-| Standard_A11 | N | N |
---
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services Database https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-database.md
- Title: Azure Germany database services | Microsoft Docs
-description: This article provides a comparison of SQL database services for Azure Germany.
- Previously updated : 10/16/2020------
-# Azure Germany database services
--
-## SQL Database
-Azure SQL Database and Azure SQL Managed Instance V12 is generally available in Azure Germany. For guidance on metadata visibility configuration and protection best practices, see the [Microsoft Security Center for SQL Database Engine](/sql/relational-databases/security/security-center-for-sql-server-database-engine-and-azure-sql-database) as well as the [SQL Database global documentation](../azure-sql/database/index.yml) and the [SQL Managed Instance global documentation](../azure-sql/managed-instance/index.yml).
-
-### Variations
-The address for SQL Database in Azure Germany is different from the address in global Azure:
-
-| Service type | Global Azure | Azure Germany |
-| | | |
-| SQL Database | *.database.windows.net | *.database.cloudapi.de |
--
-## Azure Cache for Redis
-For details on Azure Cache for Redis and how to use it, see [Azure Cache for Redis global documentation](../azure-cache-for-redis/index.yml).
-
-### Variations
-The URLs for accessing and managing Azure Cache for Redis in Azure Germany are different from the URLs in global Azure:
-
-| Service type | Global Azure | Azure Germany |
-| | | |
-| Cache endpoint | *.redis.cache.windows.net | *.redis.cache.cloudapi.de |
-| Azure portal | https://portal.azure.com | https://portal.microsoftazure.de |
-
-> [!NOTE]
-> All your scripts and code need to account for the appropriate endpoints and environments. For more information, see "To connect to Microsoft Azure Germany" in [Manage Azure Cache for Redis with Azure PowerShell](../azure-cache-for-redis/cache-how-to-manage-redis-cache-powershell.md).
->
->
--
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services Iot https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-iot.md
- Title: Azure Germany IoT services | Microsoft Docs
-description: This article provides a starting point for Azure IoT Suite for Azure Germany.
- Previously updated : 10/16/2020------
-# Azure Germany IoT services
--
-## IoT solution accelerators
-All the required services for Azure IoT Suite are available in Azure Germany.
-
-### Variations
-The home page for Azure IoT Suite in Azure Germany is different from the page in global Azure.
-
-## Solution accelerators
-You might want to start with one of the following solution accelerators.
-
-### Remote Monitoring
-The Remote Monitoring solution accelerator is an implementation of an end-to-end monitoring solution for multiple machines running in remote locations. The solution combines key Azure services to provide a generic implementation of the business scenario. You can use the solution as a starting point for your own implementation and customize it to meet your specific business requirements.
-
-### Predictive Maintenance
-The Predictive Maintenance solution accelerator is an end-to-end solution for a business scenario that predicts the point at which a failure is likely to occur. You can use this solution proactively for activities such as optimizing maintenance. The solution combines key Azure IoT Suite services, such as Azure IoT Hub, Stream Analytics, and a Machine Learning workspace. This workspace contains a model, based on a public sample data set, to predict the Remaining Useful Life (RUL) of an aircraft engine. The solution fully implements the IoT business scenario as a starting point for you to plan and implement a solution that meets your specific business requirements.
--
-## Deploying the solution accelerator
-
-Both solutions can be deployed in two ways, via website or via PowerShell.
-
-### Deploy via website
-
-Follow the instructions in the [tutorial for the preconfigured solutions](/previous-versions/azure/iot-accelerators/about-iot-accelerators) by using the home page mentioned earlier.
-
-### Deploy via PowerShell
-
-There's a full version (using Azure Resource Manager templates and Visual Studio) for the *remote monitoring* solution. Download from the [Azure-IoT-Remote-Monitoring repository on GitHub](https://github.com/Azure/azure-iot-remote-monitoring). The PowerShell deployment is ready for other environments like Azure Germany. Provide the *Environment* parameter "AzureGermanCloud," so it looks similar to this:
-
-```powershell
-build.cmd cloud debug AzureGermanCloud
-```
-
-Bing Maps is currently not available in Azure Germany and therefore cannot be subscribed to automatically. You can solve this problem by subscribing to the service in global Azure and using the service there.
-
-> [!NOTE]
-> When you use Bing Maps the way it's described here, you leave the Azure Germany environment.
-
-Here's how to do it:
-
-1. Create a Bing Maps API in the global Azure portal by clicking **+ New**, searching for **Bing Maps API for Enterprise**, and following the prompts.
-2. Get your Bing Maps API for Enterprise key from the global Azure portal:
- 1. Browse to the resource group where your Bing Maps API for Enterprise is in the global Azure portal.
- 2. Click **All Settings** > **Key Management**.
- 3. You see two keys: MasterKey and QueryKey. Copy the value for QueryKey.
-3. Pull down the latest code from the [Azure-IoT-Remote-Monitoring repository on GitHub](https://github.com/Azure/azure-iot-remote-monitoring).
-4. Run a cloud deployment in your environment by following the command-line deployment guidance in the `/docs/` repository folder.
-5. After you've run the deployment, look in your root folder for the **.user.config** file created during deployment. Open this file in a text editor.
-6. Change the following line to include the value that you copied for QueryKey: `<setting name="MapApiQueryKey" value="" />`
-7. Redeploy the solution by repeating step 4.
-
--
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services Networking https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-networking.md
- Title: Azure Germany networking services | Microsoft Docs
-description: Provides a comparison of features and guidance for private connectivity to Azure Germany
- Previously updated : 10/16/2020------
-# Azure Germany networking services
--
-## ExpressRoute (private connectivity)
-Azure ExpressRoute is generally available in Azure Germany. For more information (including partners and peering locations), see the [ExpressRoute global documentation](../expressroute/index.yml).
-
-### Variations
-
-* Azure Germany customers connect to a physically isolated capacity over a dedicated Azure Germany ExpressRoute connection.
-* Azure Germany provides increased availability and durability by using multiple region pairs located a minimum of 400 km apart.
-* By default, all Azure Germany ExpressRoute connectivity is configured as active-active redundant with support for bursting and delivers up to 10G circuit capacity.
-* Azure Germany ExpressRoute locations provide optimized pathways (including shortest hops, low latency, and high performance) for customers and Azure Germany geo-redundant regions.
-* The Azure Germany ExpressRoute private connection does not use, traverse, or depend on the Internet.
-* The Azure Germany physical and logical infrastructure is physically dedicated and separated from the international Microsoft cloud network.
-* Azure Germany ExpressRoute provides private connectivity to Microsoft Azure cloud services, but not to Microsoft 365 or Dynamics 365 cloud services.
-
-### Considerations
-Two basic services provide private network connectivity to Azure Germany: ExpressRoute and VPN (site-to-site for a typical organization).
-
-You can use ExpressRoute to create private connections between Azure Germany datacenters and your on-premises infrastructure, or in a colocation environment. ExpressRoute connections do not go over the public Internet. They offer more reliability, faster speeds, and lower latencies than typical Internet connections. In some cases, using ExpressRoute connections to transfer data between on-premises systems and Azure yields significant cost benefits.
-
-With ExpressRoute, you establish connections to Azure at an ExpressRoute location, such as an ExpressRoute Exchange provider facility. Or you directly connect to Azure from your existing WAN, such as a multiprotocol label switching (MPLS) VPN that's supplied by a network service provider.
-
-For network services to support Azure Germany customer applications and solutions, we strongly recommend that you implement ExpressRoute (private connectivity) to connect to Azure Germany. If you use VPN connections, consider the following:
-
-* Contact your authorizing official/agency to determine whether you need private connectivity or another secure connection mechanism, and to identify any additional restrictions.
-* Decide whether to mandate that the site-to-site VPN is routed through a private connectivity zone.
-* Obtain either an MPLS circuit or a VPN with a licensed private connectivity access provider.
-
-If you use a private connectivity architecture, validate that an appropriate implementation is established and maintained for the connection to the Gateway Network/Internet (GN/I) edge router demarcation point for Azure Germany. Similarly, your organization must establish network connectivity between your on-premises environment and Gateway Network/Customer (GN/C) edge router demarcation point for Azure Germany.
-
-If you are connecting to Microsoft through ExpressRoute at any one peering location in the Azure Germany region, you will have access to all Microsoft Azure cloud services across all regions within the German boundary. For example, if you connect to Microsoft in Berlin through ExpressRoute, you will have access to all Microsoft cloud services hosted in Azure Germany.
-
-For details on locations and partners, and a detailed list of ExpressRoute for Azure Germany peering locations, see the **Overview** tab in the [ExpressRoute global documentation](../expressroute/index.yml).
-
-You can purchase more than one ExpressRoute circuit. Having multiple connections offers you significant benefits on high availability due to geo-redundancy. In cases where you have multiple ExpressRoute circuits, you will receive the same set of prefixes advertised from Microsoft on the public peering paths. This means you will have multiple paths from your network to Microsoft. This situation can potentially cause suboptimal routing decisions to be made in your network. As a result, you might experience suboptimal connectivity experiences to different services. For more information, see the **How-to guides > Best practices** tab in the [ExpressRoute global documentation](../expressroute/index.yml) and select **Optimize routing**.
-
-## Support for Load Balancer
-Azure Load Balancer is generally available in Azure Germany. For more information, see the [Load Balancer global documentation](../load-balancer/load-balancer-overview.md).
-
-## Support for Traffic Manager
-Azure Traffic Manager is generally available in Azure Germany. For more information, see the [Traffic Manager global documentation](../traffic-manager/traffic-manager-overview.md).
-
-## Support for virtual network peering
-Virtual network peering is generally available in Azure Germany. For more information, see the [Virtual network peering global documentation](../virtual-network/virtual-network-peering-overview.md).
-
-## Support for VPN Gateway
-Azure VPN Gateway is generally available in Azure Germany. For more information, see the [VPN Gateway global documentation](../vpn-gateway/vpn-gateway-about-vpngateways.md).
-
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services Securityandidentity https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-securityandidentity.md
- Title: Azure Germany security and identity services | Microsoft Docs
-description: This article provides a comparison of security and identity services for Azure Germany.
- Previously updated : 10/16/2020------
-# Azure Germany security and identity services
--
-## Key Vault
-For details on the Azure Key Vault service and how to use it, see the [Key Vault global documentation](../key-vault/index.yml).
-
-Key Vault is generally available in Azure Germany. As in global Azure, there is no extension, so Key Vault is available through PowerShell and CLI only.
-
-## Azure Active Directory
-Azure Active Directory offers identity and access capabilities for information systems running in Microsoft Azure. By using directory services, security groups, and group policy, you can help control the access and security policies of the machines that use Azure Active Directory. You can use accounts and security groups, along with Azure role-based access control (Azure RBAC), to help manage access to the information systems.
-
-Azure Active Directory is generally available in Azure Germany.
-
-### Variations
-
-* Azure Active Directory in Azure Germany is completely separated from Azure Active Directory in global Azure.
-* Customers cannot use a Microsoft account to sign in to Azure Germany.
-* The login suffix for Azure Germany is *onmicrosoft.de* (not *onmicrosoft.com* like in global Azure).
-* Customers need a separate subscription to work in Azure Germany.
-* Customers in Azure Germany cannot access resources that require a subscription or identity in global Azure.
-* Customers in global Azure cannot access resources that require a subscription or identity in Azure Germany.
-* Additional domains can be added/verified only in one of the cloud environments.
-
-> [!NOTE]
-> Assigning rights to users from other tenants with *both tenants inside Azure Germany* is not yet available.
--
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services Storage https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-storage.md
- Title: Azure Germany storage services | Microsoft Docs
-description: This topic provides a comparison of storage services for Azure Germany. You will also find other relevant information.
- Previously updated : 10/16/2020------
-# Azure Germany storage services
--
-## Storage
-For details on Azure Storage and how to use it, see the [Storage global documentation](../storage/index.yml).
-
-Data stored in Azure Storage is replicated to ensure high availability. For geo-redundant storage and read-access geo-redundant storage, Azure replicates data between *pairing regions*. For Azure Germany, these pairing regions are:
-
-| Primary region | Secondary (pairing) region |
-| | |
-| Germany Central | Germany Northeast |
-| Germany Northeast | Germany Central |
-
-Replication of data keeps the data within German borders. Primary and secondary regions are paired to ensure necessary distance between datacenters to ensure availability in the event of an area-wide outage or disaster. For geo-redundant, high-availability storage, select either geo-redundant storage or read-access geo-redundant storage when you're creating a storage account.
-
-Storage Service Encryption safeguards data at rest within Azure storage accounts. When you enable that feature, Azure automatically encrypts data before persisting to storage. Data is encrypted through 256-bit AES encryption. Storage Service Encryption supports encryption of block blobs, append blobs, and page blobs.
-
-### Storage service availability by Azure Germany region
-
-| Service | Germany Central | Germany Northeast |
-| | | |
-| [Blob storage](../storage/common/storage-introduction.md#blob-storage) |GA |GA |
-| [Azure Files](../storage/common/storage-introduction.md#azure-files) | GA | GA |
-| [Table storage](../storage/common/storage-introduction.md#table-storage) |GA |GA |
-| [Queue storage](../storage/common/storage-introduction.md#queue-storage) |GA | GA |
-| [Hot/cool blob storage](../storage/blobs/access-tiers-overview.md) |GA |GA |
-| [Storage Service Encryption](../storage/common/storage-service-encryption.md) |GA |GA |
-| Import/Export |NA |NA |
-| StorSimple |NA |NA |
-
-### Variations
-The URLs for storage accounts in Azure Germany are different from those in global Azure:
-
-| Service type | Global Azure | Azure Germany |
-| | | |
-| Blob storage | *.blob.core.windows.net | *.blob.core.cloudapi.de |
-| Azure Files | *.file.core.windows.net | *.file.core.cloudapi.de |
-| Queue storage | *.queue.core.windows.net | *.queue.core.cloudapi.de |
-| Table storage | *.table.core.windows.net | *.table.core.cloudapi.de |
-
-> [!NOTE]
-> All your scripts and code need to account for the appropriate endpoints. For more information, see [Configure Azure Storage connection strings](../storage/common/storage-configure-connection-string.md).
->
->
-
-For more information on APIs, see [Cloud Storage Account Constructor](/dotnet/api/microsoft.azure.cosmos.table.cloudstorageaccount.-ctor).
-
-The endpoint suffix to use in these overloads is *core.cloudapi.de*.
-
-> [!NOTE]
-> If error 53 ("The network path was not found") is returned while you're [mounting the file share](../storage/files/storage-dotnet-how-to-use-files.md), a firewall might be blocking the outbound port. Try mounting the file share on a virtual machine that's in the same Azure subscription as storage account.
->
->
--
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services Webandmobile https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services-webandmobile.md
- Title: Azure Germany web and mobile services | Microsoft Docs
-description: Provides a comparison of features and guidance on developing applications for Azure Germany
- Previously updated : 10/16/2020------
-# Azure Germany web and mobile services
--
-## App Service
-Azure App Service is generally available in Azure Germany.
-### Variations
-The addresses for Azure App Service apps created in Azure Germany are different from those created for global Azure:
-
-| Service type | Global Azure | Azure Germany |
-| | | |
-| App Service |*.azurewebsites.net |*.azurewebsites.de|
---
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Services https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-services.md
- Title: Azure Germany available services | Microsoft Docs
-description: This article provides an overview of the available services in Azure Germany.
- Previously updated : 10/16/2020------
-# Available services in Azure Germany
--
-Azure Germany is no longer expanding its services. This article documents the services that are currently available in Azure Germany.
-
->[!NOTE]
-> For the most up-to-date list of services, see [Products by region](https://azure.microsoft.com/regions/services/).
->
->
-
-In the following tables, services specified as Azure Resource Manager enabled have resource providers and can be managed through PowerShell. For detailed information on Resource Manager providers, API versions, and schemas, see [Resource Manager supported services](../azure-resource-manager/management/resource-providers-and-types.md). Services specified as available in the portal can be managed in the [Azure Germany portal](https://portal.microsoftazure.de/).
-
-## [Compute](./germany-services-compute.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| [Virtual Machines](./germany-services-compute.md#virtual-machines) | Yes | Yes |
-| Virtual Machine Scale Sets | Yes | Yes |
-| Service Fabric | Yes | Yes |
--
-## [Networking](./germany-services-networking.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| [ExpressRoute](./germany-services-networking.md#expressroute-private-connectivity) | Yes | Yes |
-| Virtual Network | Yes | Yes |
-| [Load Balancer](./germany-services-networking.md#support-for-load-balancer) | Yes | Yes |
-| [Traffic Manager](./germany-services-networking.md#support-for-traffic-manager) | Yes | Yes |
-| [VPN Gateway](./germany-services-networking.md#support-for-vpn-gateway) | Yes | Yes |
-| Application Gateway | Yes | Yes |
---
-## [Storage](./germany-services-storage.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| [Storage](./germany-services-storage.md#storage) | Yes | Yes |
-| StorSimple | No | No |
-| Backup | Yes | Yes |
-| Site Recovery | Yes | Yes |
---
-## [Web and mobile](./germany-services-webandmobile.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| [App Service: Web Apps](./germany-services-webandmobile.md#app-service) | Yes | Yes |
-| [App Service: API Apps](./germany-services-webandmobile.md#app-service) | Yes | Yes |
-| [App Service: Mobile Apps](./germany-services-webandmobile.md#app-service) | Yes | Yes |
-| Media Services | Yes | Yes |
--
-## [Databases](./germany-services-database.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| [SQL Database](./germany-services-database.md#sql-database) | Yes | Yes |
-| Azure Synapse Analytics | Yes | Yes |
-| SQL Server Stretch Database | Yes | Yes |
-| [Azure Cache for Redis](./germany-services-database.md#azure-cache-for-redis) | Yes | Yes |
-| Azure Cosmos DB | Yes | Yes |
--
-## Intelligence and analytics
-
-| Service | Resource Manager | Portal |
-| | | |
-| HDInsight | Yes | Yes |
-| Machine Learning | Yes | No |
--
-## [Internet of Things (IoT)](./germany-services-iot.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| Event Hubs | Yes | Yes |
-| IoT Hub | Yes | Yes |
-| Notification Hubs | Yes | No |
-| Stream Analytics | Yes | Yes |
--
-## Enterprise integration
-
-| Service | Resource Manager | Portal |
-| | | |
-| Service Bus | Yes | Yes |
-| StorSimple | No | No |
-| SQL Server Stretch Database | Yes | Yes |
---
-## [Security and identity](./germany-services-securityandidentity.md)
-
-| Service | Resource Manager | Portal |
-| | | |
-| Active Directory Free | Yes | Yes |
-| Active Directory Premium | No | No |
-| [Key Vault](./germany-services-securityandidentity.md#key-vault) | Yes | No |
---
-## Monitoring and management
-
-| Service | Resource Manager | Portal |
-| | | |
-| Automation | No | No |
-| Backup | Yes | Yes |
-| Azure Monitor logs | No | No |
-| Site Recovery | Yes | Yes |
---
-## Next steps
-For supplemental information and updates, subscribe to the [Microsoft Azure Germany blog](/archive/blogs/azuregermany/).
germany Germany Welcome https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/germany/germany-welcome.md
- Title: Azure Germany overview | Microsoft Docs
-description: This article provides an overview of the Azure Germany cloud capabilities and the trustworthy design and security that support compliance requirements for German data privacy regulations
- Previously updated : 10/16/2020------
-# Welcome to Azure Germany
--
-## Overview
-Microsoft Azure Germany delivers a cloud platform built on the [foundational principles of security, privacy, compliance, and transparency](https://azure.microsoft.com/overview/clouds/germany/). Azure Germany is a physically isolated instance of Microsoft Azure. It uses world-class security and [compliance services](https://azure.microsoft.com/support/trust-center/compliance/) that are critical to German data privacy regulations for all systems and applications built on its architecture. Operated by a data trustee, Azure Germany supports multiple hybrid scenarios for building and deploying solutions on-premises or in the cloud. You can also take advantage of the instant scalability and guaranteed uptime of a hyperscale cloud service.
-
-It brings data residency, in transit and at rest in Germany, and data replication across German datacenters for business continuity. Customer data in the two datacenters is managed under the control of a data trustee, T-Systems International. This trustee is an independent German company and a subsidiary of Deutsche Telekom. It provides additional controls for customers' data, because access is provided only with the permission of customers or the data trustee.
-
-Microsoft commercial cloud services in these datacenters adhere to German data-handling regulations and give customers additional choices for how and where data is processed.
-
-Azure Germany includes the core components of infrastructure as a service (IaaS), platform as a service (PaaS), and software as a service (SaaS). These components include infrastructure, network, storage, data management, identity management, and many other services.
-
-Azure Germany supports most of the same great features that global Azure customers have used, like geosynchronous data replication and autoscaling.
-
-## Azure Germany documentation
-This site describes the capabilities of [Microsoft Azure Germany](https://azure.microsoft.com/overview/clouds/germany/) services, and it provides general guidance that applies to all customers. Before you include specifically regulated data in your Azure Germany subscription, you should familiarize yourself with the Azure Germany capabilities.
-
-For current information on the Azure Germany services covered under specific accreditations and regulations, see the [Microsoft Azure Trust Center Compliance page](https://www.microsoft.com/TrustCenter/Compliance/default.aspx). Additional Microsoft services might be available, but they are not within the scope of the Azure Germany covered services or this documentation. Azure Germany services might also permit you to use various additional resources, applications, or services that are provided by third parties--or by Microsoft under separate terms of use and privacy policies. These are also not included in the scope of this documentation. You are responsible for reviewing the terms of all such "add-on" offerings, such as Azure Marketplace offerings, to ensure that they meet your needs for compliance.
-
-Azure Germany is available to eligible customers and partners globally who intend to do business in the EU/EFTA, including the United Kingdom.
-
-## General guidance for customers
-Most of the technical content that's currently available assumes that applications are being developed for global Azure rather than for Azure Germany. It's important for you to ensure that developers are aware of key differences for applications developed to be hosted in Azure Germany:
-
-* Certain services and features that are in specific regions of global Azure might not be available in Azure Germany. For the most up-to-date services that are generally available, see the [regions page](https://azure.microsoft.com/regions/services).
-* For features that are offered in Azure Germany, there are configuration differences from global Azure. You should review your sample code, configurations, and steps to ensure that you are building and executing within the Azure Germany environment.
-* For information that identifies the Azure Germany boundary, and for customer-regulated/controlled data guidance and best practices, refer to the Azure Germany technical services documentation on this site.
-
-## Next steps
-For supplemental information and updates, subscribe to the
-[Azure Germany blog](/archive/blogs/azuregermany/).
-
-If you're interested in learning more about Azure Germany, use the following links:
-
-* [Sign up for a trial](https://azure.microsoft.com/free/germany/)
-* [Sign in](https://portal.microsoftazure.de/) (if you already have an Azure Germany account)
-* [Acquiring and accessing Azure Germany](https://azure.microsoft.com/overview/clouds/germany/)
governance Create Management Group Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/governance/management-groups/create-management-group-go.md
To enable Go to manage management groups, the package must be added. This packag
can be used, including [bash on Windows 10](/windows/wsl/install-win10) or locally installed. 1. Check that the latest Go is installed (at least **1.15**). If it isn't yet installed, download it
- at [Golang.org](https://golang.org/dl/).
+ at [Golang.org](https://go.dev/dl/).
1. Check that the latest Azure CLI is installed (at least **2.5.1**). If it isn't yet installed, see [Install the Azure CLI](/cli/azure/install-azure-cli).
governance Policy Devops Pipelines https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/governance/policy/tutorials/policy-devops-pipelines.md
+
+ Title: "Tutorial: Implement Azure Policy with Azure DevOps"
+description: In this tutorial, you implement an Azure Policy with an Azure DevOps release pipeline.
Last updated : 03/24/2022+++++
+# Implement Azure Policy with Azure DevOps release pipelines
+
+**Azure DevOps Services**
+
+Learn how to enforce compliance policies on your Azure resources before and after deployment with Azure Pipelines. Azure Pipelines lets you build, test, and deploy with continuous integration (CI) and continuous delivery (CD) using [Azure DevOps](/azure/devops/). One scenario for adding Azure Policy to a pipeline is when you want to ensure that resources are deployed only to authorized regions and are configured to send diagnostics logs to Azure Log Analytics.
+
+You'll need a [classic pipeline](/azure/devops/pipelines/release/define-multistage-release-process) to take advantage of Azure Policy.
+
+For more information, see [What is Azure Pipelines?](/azure/devops/pipelines/get-started/what-is-azure-pipelines)
+and [Create your first pipeline](/azure/devops/pipelines/create-first-pipeline).
+## Prepare
+
+1. Create an [Azure Policy](/azure/governance/policy/tutorials/create-and-manage) in the Azure portal.
+ There are several [predefined sample policies](/azure/governance/policy/samples/)
+ that can be applied to a management group, subscription, and resource group.
+
+1. In Azure DevOps, create a release pipeline that contains at least one stage, or open an existing release pipeline.
+
+1. Add a pre- or post-deployment condition that includes the **Security and compliance assessment** task as a gate.
+ [More details](/azure/devops/pipelines/release/deploy-using-approvals.md#configure-gate).
+
+ ![Screenshot of Azure Policy Gate.](../media/devops-policy/azure-policy-gate.png)
+
+## Validate for any violation(s) during a release
+
+> [!NOTE]
+> Use the [AzurePolicyCheckGate](/azure/devops/pipelines/tasks/deploy/azure-policy) task to check for policy compliance in YAML. This task can only be used as a gate and not in a build or a release pipeline.
+
+1. Navigate to your team project in Azure DevOps.
+
+1. In the **Pipelines** section, open the **Releases** page and create a new release.
+
+1. Choose the **In progress** link in the release view to open the live logs page.
+
+1. When the release is in progress and attempts to perform an action disallowed by
+ the defined policy, the deployment is marked as **Failed**. The error message contains a link to view the policy violations.
+
+ ![Screenshot of Azure Policy failure message.](../media/devops-policy/azure-policy-02.png)
+
+1. An error message is written to the logs and displayed in the stage status panel in the releases page of Azure Pipelines.
+
+ ![Screenshot of Azure Policy failure in log.](../media/devops-policy/azure-policy-03.png)
+
+1. When the policy compliance gate passes the release, a **Succeeded** status is displayed.
+
+ ![Screenshot of Policy Gates.](../media/devops-policy/policy-compliance-gates.png)
+
+1. Choose the successful deployment to view the detailed logs.
+
+ ![Screenshot of Policy Logs.](../media/devops-policy/policy-logs.png)
+
+## Next steps
+
+To learn more about the structures of policy definitions, look at this article:
+
+> [!div class="nextstepaction"]
+> [Azure Policy definition structure](../concepts/definition-structure.md)
governance First Query Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/governance/resource-graph/first-query-go.md
To enable Go to query Azure Resource Graph, the package must be added. This pack
Go can be used, including [bash on Windows 10](/windows/wsl/install-win10) or locally installed. 1. Check that the latest Go is installed (at least **1.14**). If it isn't yet installed, download it
- at [Golang.org](https://golang.org/dl/).
+ at [Golang.org](https://go.dev/dl/).
1. Check that the latest Azure CLI is installed (at least **2.5.1**). If it isn't yet installed, see [Install the Azure CLI](/cli/azure/install-azure-cli).
hdinsight Hdinsight Go Sdk Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/hdinsight/hdinsight-go-sdk-overview.md
If you donΓÇÖt have an Azure subscription, create a [free account](https://azure
## Prerequisites * A [`go get` tool](https://github.com/golang/go/wiki/GoGetTools).
-* [Go](https://golang.org/dl/).
+* [Go](https://go.dev/dl/).
## SDK installation
healthcare-apis Disaster Recovery https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/healthcare-apis/azure-api-for-fhir/disaster-recovery.md
Consider the following steps for DR test.
The disaster recovery feature incurs extra costs because data of the compute and data replica running in the environment in the secondary region. For more pricing details, refer to the [Azure API for FHIR pricing]( https://azure.microsoft.com/pricing/details/azure-api-for-fhir) web page. > [!NOTE]
-> The DR offering is subject to the [SLA for Azure API for FHIR](https://azure.microsoft.com/support/legal/sla/azure-api-for-fhir/v1_0/), 1.0.
+> The DR offering is subject to the [SLA for Azure API for FHIR](https://azure.microsoft.com/pricing/details/health-data-services), 1.0.
## Next steps
The disaster recovery feature incurs extra costs because data of the compute and
In this article, you've learned how DR for Azure API for FHIR works and how to enable it. To learn about Azure API for FHIR's other supported features, see: >[!div class="nextstepaction"]
->[FHIR supported features](fhir-features-supported.md)
+>[FHIR supported features](fhir-features-supported.md)
healthcare-apis Get Started With Dicom https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/healthcare-apis/dicom/get-started-with-dicom.md
You can find more details on DICOMweb standard APIs and change feed in the [DICO
#### DICOMcast
-DICOMcast is currently available as an [open source](https://github.com/microsoft/dicom-server/blob/main/docs/concepts/dicom-cast.md) project, and it's under private preview as a managed service. To enable DICOMcast as a managed service for your Azure subscription, request access by creating an [Azure support ticket](https://azure.microsoft.com/support/create-ticket/following) by following the guidance in the article [DICOMcast access request](dicom-cast-access-request.md).
+DICOMcast is currently available as an [open source](https://github.com/microsoft/dicom-server/blob/main/docs/concepts/dicom-cast.md) project, and it's under private preview as a managed service. To enable DICOMcast as a managed service for your Azure subscription, request access by creating an [Azure support ticket](https://azure.microsoft.com/support/create-ticket/) by following the guidance in the article [DICOMcast access request](dicom-cast-access-request.md).
## Next steps
iot-dps Iot Dps Ha Dr https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-dps/iot-dps-ha-dr.md
DPS also supports [Availability Zones](../availability-zones/az-overview.md). An
* Central US * East US * East US 2
+* France Central
* Japan East * North Europe * UK South
iot-hub Iot Hub Devguide Identity Registry https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-devguide-identity-registry.md
The IoT Hub identity registry contains a field called **connectionState**. Only
If your IoT solution needs to know if a device is connected, you can implement the *heartbeat pattern*. In the heartbeat pattern, the device sends device-to-cloud messages at least once every fixed amount of time (for example, at least once every hour). Therefore, even if a device does not have any data to send, it still sends an empty device-to-cloud message (usually with a property that identifies it as a heartbeat). On the service side, the solution maintains a map with the last heartbeat received for each device. If the solution does not receive a heartbeat message within the expected time from the device, it assumes that there is a problem with the device.
-A more complex implementation could include the information from [Azure Monitor](../azure-monitor/index.yml) and [Azure Resource Health](../service-health/resource-health-overview.md) to identify devices that are trying to connect or communicate but failing. To learn more, see [Monitor IoT Hub](monitor-iot-hub.md) and [Check IoT Hub resource health](iot-hub-azure-service-health-integration.md#check-health-of-an-iot-hub-with-azure-resource-health). When you implement the heartbeat pattern, make sure to check [IoT Hub Quotas and Throttles](iot-hub-devguide-quotas-throttling.md).
+A more complex implementation could include the information from [Azure Monitor](../azure-monitor/index.yml) and [Azure Resource Health](../service-health/resource-health-overview.md) to identify devices that are trying to connect or communicate but failing. To learn more about using these services with IoT Hub, see [Monitor IoT Hub](monitor-iot-hub.md) and [Check IoT Hub resource health](iot-hub-azure-service-health-integration.md#check-health-of-an-iot-hub-with-azure-resource-health). For more specific information about using Azure Monitor or Event Grid to monitor device connectivity, see [Monitor, diagnose, and troubleshoot device connectivity](iot-hub-troubleshoot-connectivity.md). When you implement the heartbeat pattern, make sure to check [IoT Hub Quotas and Throttles](iot-hub-devguide-quotas-throttling.md).
> [!NOTE] > If an IoT solution uses the connection state solely to determine whether to send cloud-to-device messages, and messages are not broadcast to large sets of devices, consider using the simpler *short expiry time* pattern. This pattern achieves the same result as maintaining a device connection state registry using the heartbeat pattern, while being more efficient. If you request message acknowledgements, IoT Hub can notify you about which devices are able to receive messages and which are not.
iot-hub Iot Hub Device Management Visual Studio https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-device-management-visual-studio.md
- Title: Azure IoT device management w/ Visual Studio Cloud Explorer
-description: Use the Cloud Explorer for Visual Studio for Azure IoT Hub device management, featuring the Direct methods and the Twin's desired properties management options.
---- Previously updated : 08/20/2019----
-# Use Cloud Explorer for Visual Studio for Azure IoT Hub device management
-
-![End-to-end diagram](media/iot-hub-device-management-visual-studio/iot-e2e-simple.png)
-
-In this article, you learn how to use the Cloud Explorer for Visual Studio with various management options on your development computer. [Cloud Explorer](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.CloudExplorerForVS) is a useful Visual Studio extension that enables you to view your Azure resources, inspect their properties and perform key developer actions from within Visual Studio. It comes with management options that you can use to perform various tasks.
--
-| Management option | Task |
-|-|--|
-| Direct methods | Make a device act such as starting or stopping sending messages or rebooting the device. |
-| Read device twin | Get the reported state of a device. For example, the device reports the LED is blinking now. |
-| Update device twin | Put a device into certain states, such as setting an LED to green or setting the telemetry send interval to 30 minutes. |
-| Cloud-to-device messages | Send notifications to a device. For example, "It is very likely to rain today. Don't forget to bring an umbrella." |
-
-For more detailed explanation on the differences and guidance on using these options, see [Device-to-cloud communication guidance](iot-hub-devguide-d2c-guidance.md) and [Cloud-to-device communication guidance](iot-hub-devguide-c2d-guidance.md).
-
-Device twins are JSON documents that store device state information, including metadata, configurations, and conditions. IoT Hub persists a device twin for each device that connects to it. For more information about device twins, see [Get started with device twins](iot-hub-node-node-twin-getstarted.md).
-
-## Prerequisites
--- An active Azure subscription.--- An Azure IoT Hub under your subscription.--- Microsoft Visual Studio 2017 Update 9 or later. This article uses [Visual Studio 2017 or Visual Studio 2019](https://www.visualstudio.com/vs/).--- Cloud Explorer component from Visual Studio Installer, which is selected by default with Azure Workload.-
-## Update Cloud Explorer to latest version
-
-The Cloud Explorer component from Visual Studio Installer for Visual Studio 2017 only supports monitoring device-to-cloud and cloud-to-device messages. To use Visual Studio 2017, download and install the latest [Cloud Explorer](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.CloudExplorerForVS).
-
-## Sign in to access your hub
-
-1. In Visual Studio, select **View** > **Cloud Explorer** to open Cloud Explorer.
-
-1. Select the Account Management icon to show your subscriptions.
-
- ![Account Management icon](media/iot-hub-visual-studio-cloud-device-messaging/account-management-icon.png)
-
-1. If you are signed in to Azure, your accounts appear. To sign into Azure for the first time, choose **Add an account**.
-
-1. Select the Azure subscriptions you want to use and choose **Apply**.
-
-1. Expand your subscription, then expand **IoT Hubs**. Under each hub, you can see your devices for that hub. Right-click one device to access the management options.
-
- ![Management options](media/iot-hub-device-management-visual-studio/management-options-vs2019.png)
-
-## Direct methods
-
-To use direct methods, do the following steps:
-
-1. Right-click your device and select **Invoke Device Direct Method**.
-
-1. Enter the method name and payload in **Invoke Direct Method**, and then select **OK**.
-
- Results appear in **Output**.
-
-## Update device twin
-
-To edit a device twin, do the following steps:
-
-1. Right-click your device and select **Edit Device Twin**.
-
- An **azure-iot-device-twin.json** file opens with the content of device twin.
-
-1. Make some edits of **tags** or **properties.desired** fields to the **azure-iot-device-twin.json** file.
-
-1. Press **Ctrl+S** to update the device twin.
-
- Results appear in **Output**.
-
-## Send cloud-to-device messages
-
-To send a message from your IoT Hub to your device, follow these steps:
-
-1. Right-click your device and select **Send C2D Message**.
-
-1. Enter the message in **Send C2D message** and select **OK**.
-
- Results appear in **Output**.
-
-## Next steps
-
-You've learned how to use Cloud Explorer for Visual Studio with various management options.
-
iot-hub Iot Hub Event Grid https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-event-grid.md
Applications that handle IoT Hub events should follow these suggested practices:
* [Compare the differences between routing IoT Hub events and messages](iot-hub-event-grid-routing-comparison.md) * [Learn how to use IoT telemetry events to implement IoT spatial analytics using Azure Maps](../azure-maps/tutorial-iot-hub-maps.md)+
+* [Learn more about how to use Event Grid and Azure Monitor to monitor, diagnose, and troubleshoot device connectivity to IoT Hub](iot-hub-troubleshoot-connectivity.md)
iot-hub Iot Hub How To Order Connection State Events https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-how-to-order-connection-state-events.md
To remove an Azure Cosmos DB account from the Azure portal, right-click the acco
* [Try the IoT Hub events tutorial](../event-grid/publish-iot-hub-events-to-logic-apps.md)
-* Learn about what else you can do with [Event Grid](../event-grid/overview.md)
+* Learn about what else you can do with [Event Grid](../event-grid/overview.md)
+
+* Learn how to use Event Grid and Azure Monitor to [Monitor, diagnose, and troubleshoot device connectivity to IoT Hub](iot-hub-troubleshoot-connectivity.md)
iot-hub Iot Hub Managed Identity https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-managed-identity.md
az resource show --resource-type Microsoft.Devices/IotHubs --name <iot-hub-resou
Managed identities can be used for egress connectivity from IoT Hub to other Azure services for [message routing](iot-hub-devguide-messages-d2c.md), [file upload](iot-hub-devguide-file-upload.md), and [bulk device import/export](iot-hub-bulk-identity-mgmt.md). You can choose which managed identity to use for each IoT Hub egress connectivity to customer-owned endpoints including storage accounts, event hubs, and service bus endpoints. > [!NOTE]
-> Use a system-assigned managed identity to access private resources.
+> Only system-assigned managed identity gives IoT Hub access to private resources. If you want to use user-assigned managed identity, then the public access on those private resources needs to be enabled in order to allow connectivity.
## Configure message routing with managed identities
iot-hub Iot Hub Mqtt Support https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-mqtt-support.md
If a device cannot use the device SDKs, it can still connect to the public devic
* For the **ClientId** field, use the **deviceId**.
-* For the **Username** field, use `{iothubhostname}/{device_id}/?api-version=2018-06-30`, where `{iothubhostname}` is the full CName of the IoT hub.
+* For the **Username** field, use `{iothubhostname}/{device_id}/?api-version=2021-04-12`, where `{iothubhostname}` is the full CName of the IoT hub.
For example, if the name of your IoT hub is **contoso.azure-devices.net** and if the name of your device is **MyDevice01**, the full **Username** field should contain:
- `contoso.azure-devices.net/MyDevice01/?api-version=2018-06-30`
+ `contoso.azure-devices.net/MyDevice01/?api-version=2021-04-12`
It's strongly recommended to include api-version in the field. Otherwise it could cause unexpected behaviors.
Connecting to IoT Hub over MQTT using a module identity is similar to the device
* Set the client ID to `{device_id}/{module_id}`.
-* If authenticating with username and password, set the username to `<hubname>.azure-devices.net/{device_id}/{module_id}/?api-version=2018-06-30` and use the SAS token associated with the module identity as your password.
+* If authenticating with username and password, set the username to `<hubname>.azure-devices.net/{device_id}/{module_id}/?api-version=2021-04-12` and use the SAS token associated with the module identity as your password.
* Use `devices/{device_id}/modules/{module_id}/messages/events/` as topic for publishing telemetry.
client.on_disconnect = on_disconnect
client.on_publish = on_publish client.username_pw_set(username=iot_hub_name+".azure-devices.net/" +
- device_id + "/?api-version=2018-06-30", password=sas_token)
+ device_id + "/?api-version=2021-04-12", password=sas_token)
client.tls_set(ca_certs=path_to_root_cert, certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
To authenticate using a device certificate, update the code snippet above with t
# Set the username but not the password on your client client.username_pw_set(username=iot_hub_name+".azure-devices.net/" +
- device_id + "/?api-version=2018-06-30", password=None)
+ device_id + "/?api-version=2021-04-12", password=None)
# Set the certificate and key paths on your client cert_file = "<local path to your certificate file>"
iot-hub Iot Hub Troubleshoot Connectivity https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-troubleshoot-connectivity.md
Title: Monitor and troubleshoot disconnects with Azure IoT Hub
+ Title: Monitor and troubleshoot device connectivity to Azure IoT Hub
description: Learn to monitor and troubleshoot common errors with device connectivity for Azure IoT Hub
#Customer intent: As an operator for Azure IoT Hub, I need to know how to find out when devices are disconnecting unexpectedly and troubleshoot resolve those issues right away.
-# Monitor, diagnose, and troubleshoot Azure IoT Hub disconnects
+# Monitor, diagnose, and troubleshoot Azure IoT Hub device connectivity
Connectivity issues for IoT devices can be difficult to troubleshoot because there are many possible points of failure. Application logic, physical networks, protocols, hardware, IoT Hub, and other cloud services can all cause problems. The ability to detect and pinpoint the source of an issue is critical. However, an IoT solution at scale could have thousands of devices, so it's not practical to check individual devices manually. IoT Hub integrates with two Azure services to help you:
Use the following problem resolution guides for help with the most common errors
* [500008 GenericTimeout](iot-hub-troubleshoot-error-500xxx-internal-errors.md)
+## Azure Monitor: Use logs to monitor connectivity for a specific device
+
+There may be situations when you want to use Azure Monitor to see connectivity errors and information for a specific device. To isolate connectivity events for a device, you can follow the same steps as in the preceding section, but enter the following query. Replace *test-device* with the name of your device.
+
+```kusto
+AzureDiagnostics
+| where ResourceProvider == "MICROSOFT.DEVICES" and ResourceType == "IOTHUBS"
+| where Category == "Connections"
+| extend DeviceId = tostring(parse_json(properties_s).deviceId)
+| where DeviceId == "test-device"
+```
+
+The query returns both error and informational events for your target device. The following example output shows an informational **deviceConnect** event:
++ ## MQTT device disconnect behavior with Azure IoT SDKs Azure IoT device SDKs disconnect from IoT Hub and then reconnect when they renew SAS tokens over the MQTT (and MQTT over WebSockets) protocol. In logs, this shows up as informational device disconnect and connect events sometimes accompanied by error events.
iot-hub Iot Hub Visual Studio Cloud Device Messaging https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/iot-hub-visual-studio-cloud-device-messaging.md
- Title: Use VS Cloud Explorer to manage Azure IoT Hub device messaging
-description: Learn how to use Cloud Explorer for Visual Studio to monitor device to cloud messages and send cloud to device messages in Azure IoT Hub.
---- Previously updated : 08/20/2019----
-# Use Cloud Explorer for Visual Studio to send and receive messages between your device and IoT Hub
-
-![End-to-end diagram](./media/iot-hub-visual-studio-cloud-device-messaging/e-to-e-diagram.png)
-
-In this article, you learn how to use Cloud Explorer for Visual Studio to monitor device-to-cloud messages and to send cloud-to-device messages. Device-to-cloud messages could be sensor data that your device collects and then sends to your IoT Hub. Cloud-to-device messages could be commands that your IoT Hub sends to your device. For example, blink an LED that is connected to your device.
-
-[Cloud Explorer](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.CloudExplorerForVS) is a useful Visual Studio extension that enables you to view your Azure resources, inspect their properties and perform key developer actions from within Visual Studio. This article focuses on how to use Cloud Explorer to send and receive messages between your device and your hub.
--
-## Prerequisites
--- An active Azure subscription.--- An Azure IoT Hub under your subscription.--- Microsoft Visual Studio 2017 Update 9 or later. This article uses [Visual Studio 2019](https://www.visualstudio.com/vs/).--- The Cloud Explorer component from Visual Studio Installer, which is selected by default with Azure Workload.-
-## Update Cloud Explorer to latest version
-
-The Cloud Explorer component from Visual Studio Installer for Visual Studio 2017 only supports monitoring device-to-cloud and cloud-to-device messages. To use Visual Studio 2017, download and install the latest [Cloud Explorer](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.CloudExplorerForVS).
-
-## Sign in to access your hub
-
-To access your hub, follow these steps:
-
-1. In Visual Studio, select **View** > **Cloud Explorer** to open Cloud Explorer.
-
-1. Select the Account Management icon to show your subscriptions.
-
- ![Account Management icon](media/iot-hub-visual-studio-cloud-device-messaging/account-management-icon.png)
-
-1. If you are signed in to Azure, your accounts appear. To sign into Azure for the first time, choose **Add an account**.
-
-1. Select the Azure subscriptions you want to use and choose **Apply**.
-
-1. Expand your subscription, then expand **IoT Hubs**. Under each hub, you can see your devices for that hub.
-
- ![Device List](media/iot-hub-visual-studio-cloud-device-messaging/hub-device-list.png)
-
-## Monitor device-to-cloud messages
-
-To monitor messages that are sent from your device to your IoT Hub, follow these steps:
-
-1. Right-click your IoT Hub or device and select **Start Monitoring D2C Message**.
-
- ![Start Monitoring D2C Message](media/iot-hub-visual-studio-cloud-device-messaging/start-monitoring-d2c-message-vs2019.png)
-
-1. The monitored messages appear under **Output**.
-
- ![Monitoring D2C Message Result](media/iot-hub-visual-studio-cloud-device-messaging/monitor-d2c-message-result-vs2019.png)
-
-1. To stop monitoring, right-click on any IoT Hub or device and select **Stop Monitoring D2C Message**.
-
-## Send cloud-to-device messages
-
-To send a message from your IoT Hub to your device, follow these steps:
-
-1. Right-click your device and select **Send C2D Message**.
-
-1. Enter the message in input box.
-
- ![Send C2D Message](media/iot-hub-visual-studio-cloud-device-messaging/send-c2d-message-test.png)
-
- Results appear under **Output**.
-
- ![Send C2D Message Result](media/iot-hub-visual-studio-cloud-device-messaging/send-c2d-message-result-vs2019.png)
-
-## Next steps
-
-YouΓÇÖve learned how to monitor device-to-cloud messages and send cloud-to-device messages between your IoT device and Azure IoT Hub.
-
iot-hub Monitor Iot Hub Reference https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/monitor-iot-hub-reference.md
The connections category tracks device connect and disconnect events from an IoT
} ```
+For detailed information about using connections logs to monitor device connectivity, see [Monitor, diagnose, and troubleshoot device connectivity to Azure IoT Hub](iot-hub-troubleshoot-connectivity.md).
+ ### Device telemetry The device telemetry category tracks errors that occur at the IoT hub and are related to the telemetry pipeline. This category includes errors that occur when sending telemetry events (such as throttling) and receiving telemetry events (such as unauthorized reader). This category cannot catch errors caused by code running on the device itself.
iot-hub Monitor Iot Hub https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/iot-hub/monitor-iot-hub.md
Following are queries that you can use to help you monitor your IoT hub.
| summarize max(TimeGenerated) by DeviceId, _ResourceId ```
+- Connection events for a specific device: All connection events logged for a specific device (*test-device*).
+
+ ```kusto
+ AzureDiagnostics
+ | where ResourceProvider == "MICROSOFT.DEVICES" and ResourceType == "IOTHUBS"
+ | where Category == "Connections"
+ | extend DeviceId = tostring(parse_json(properties_s).deviceId)
+ | where DeviceId == "test-device"
+ ```
+ - SDK version of devices: List of devices and their SDK versions for device connections or device to cloud twin operations. ```kusto
When creating an alert rule based on platform metrics, be aware that for IoT Hub
## Monitor per-device disconnects with Event Grid
-Azure Monitor provides a metric, *Connected devices*, that you can use to monitor the number of devices connected to your IoT Hub and trigger an alert when number of connected devices drops below a threshold value. While this may be sufficient for some scenarios, [Azure Event Grid](../event-grid/index.yml) provides a low-latency, per-device monitoring solution that you can use to track device connections for critical devices and infrastructure.
+Azure Monitor provides a metric, *Connected devices*, that you can use to monitor the number of devices connected to your IoT Hub and trigger an alert when number of connected devices drops below a threshold value. Azure Monitor also emits events in the [connections category](monitor-iot-hub-reference.md#connections) that you can use to monitor device connects, disconnects, and connection errors. While these may be sufficient for some scenarios, [Azure Event Grid](../event-grid/index.yml) provides a low-latency, per-device monitoring solution that you can use to track device connections for critical devices and infrastructure.
With Event Grid, you can subscribe to the IoT Hub [**DeviceConnected** and **DeviceDisconnected** events](iot-hub-event-grid.md#event-types) to trigger alerts and monitor device connection state. Event Grid provides much lower event latency than Azure Monitor, and you can monitor on a per-device basis, rather than for the total number of connected devices. These factors make Event Grid the preferred method for monitoring connections for critical devices and infrastructure. We highly recommend using Event Grid to monitor device connections in production environments.
-For more detailed information about monitoring device connections with Event Grid and Azure Monitor, see [Monitor, diagnose, and troubleshoot disconnects with Azure IoT Hub](iot-hub-troubleshoot-connectivity.md).
+For more detailed information about monitoring device connectivity with Event Grid and Azure Monitor, see [Monitor, diagnose, and troubleshoot device connectivity to Azure IoT Hub](iot-hub-troubleshoot-connectivity.md).
## Next steps - See [Monitoring Azure IoT Hub data reference](monitor-iot-hub-reference.md) for a reference of the metrics, logs, and other important values created by [service name]. - See [Monitoring Azure resources with Azure Monitor](../azure-monitor/essentials/monitor-azure-resource.md) for details on monitoring Azure resources.+
+- See [Monitor, diagnose, and troubleshoot device connectivity to Azure IoT Hub](iot-hub-troubleshoot-connectivity.md) for details on monitoring device connectivity.
key-vault Quick Create Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/key-vault/certificates/quick-create-go.md
Follow this guide to learn how to use the [azcertificates](https://pkg.go.dev/gi
## Prerequisites - An Azure subscription - [create one for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F).-- **Go installed**: Version 1.16 or [above](https://golang.org/dl/)
+- **Go installed**: Version 1.16 or [above](https://go.dev/dl/)
- [Azure CLI](/cli/azure/install-azure-cli) ## Set up your environment
az group delete --resource-group myResourceGroup
- [Secure access to a key vault](../general/security-features.md) - [Azure Key Vault developer's guide](../general/developers-guide.md) - [Key Vault security overview](../general/security-features.md)-- [Authenticate with Key Vault](../general/authentication.md)
+- [Authenticate with Key Vault](../general/authentication.md)
key-vault Quick Create Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/key-vault/keys/quick-create-go.md
Follow this guide to learn how to use the [azkeys](https://pkg.go.dev/github.com
## Prerequisites - An Azure subscription - [create one for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F).-- **Go installed**: Version 1.16 or [above](https://golang.org/dl/)
+- **Go installed**: Version 1.16 or [above](https://go.dev/dl/)
- [Azure CLI](/cli/azure/install-azure-cli)
az group delete --resource-group quickstart-rg
- [Secure access to a key vault](../general/security-features.md) - [Azure Key Vault developer's guide](../general/developers-guide.md) - [Key Vault security overview](../general/security-features.md)-- [Authenticate with Key Vault](../general/authentication.md)
+- [Authenticate with Key Vault](../general/authentication.md)
key-vault Quick Create Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/key-vault/secrets/quick-create-go.md
Get started with the [azsecrets](https://pkg.go.dev/github.com/Azure/azure-sdk-f
## Prerequisites - An Azure subscription. If you don't already have a subscription, you can [create one for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F).-- [Go version 1.16 or later](https://golang.org/dl/), installed.
+- [Go version 1.16 or later](https://go.dev/dl/), installed.
- [The Azure CLI](/cli/azure/install-azure-cli), installed. ## Setup
az group delete --resource-group quickstart-rg
- [Overview of Azure Key Vault](../general/overview.md) - [Azure Key Vault developers guide](../general/developers-guide.md) - [Key Vault security overview](../general/security-features.md)-- [Authenticate with Key Vault](../general/authentication.md)
+- [Authenticate with Key Vault](../general/authentication.md)
load-balancer Quickstart Load Balancer Standard Internal Powershell https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/load-balancer/quickstart-load-balancer-standard-internal-powershell.md
Title: 'Quickstart: Create an internal load balancer - Azure PowerShell' description: This quickstart shows how to create an internal load balancer using Azure PowerShell- - - Previously updated : 08/27/2020 Last updated : 03/24/2022 #Customer intent: I want to create a load balancer so that I can load balance internal traffic to VMs.
Get started with Azure Load Balancer by using Azure PowerShell to create an inte
## Prerequisites -- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F).
+- An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)
+ - Azure PowerShell installed locally or Azure Cloud Shell If you choose to install and use PowerShell locally, this article requires the Azure PowerShell module version 5.4.1 or later. Run `Get-Module -ListAvailable Az` to find the installed version. If you need to upgrade, see [Install Azure PowerShell module](/powershell/azure/install-Az-ps). If you're running PowerShell locally, you also need to run `Connect-AzAccount` to create a connection with Azure.
Create a resource group with [New-AzResourceGroup](/powershell/module/az.resourc
```azurepowershell-interactive New-AzResourceGroup -Name 'CreateIntLBQS-rg' -Location 'eastus' ```--
-# [**Standard SKU**](#tab/option-1-create-load-balancer-standard)
->[!NOTE]
->Standard SKU load balancer is recommended for production workloads. For more information about skus, see **[Azure Load Balancer SKUs](skus.md)**.
+## Configure virtual network
-In this section, you create a load balancer that load balances virtual machines.
+When you create an internal load balancer, a virtual network is configured as the network for the load balancer. Before you deploy VMs and test your load balancer, create the supporting virtual network resources.
-When you create an internal load balancer, a virtual network is configured as the network for the load balancer.
+Create a virtual network for the backend virtual machines
-## Configure virtual network - Standard
+Create a network security group to define inbound connections to your virtual network
-Before you deploy VMs and test your load balancer, create the supporting virtual network resources.
+Create an Azure Bastion host to securely manage the virtual machines in the backend pool
-Create a virtual network for the backend virtual machines.
+### Create virtual network, network security group, bastion host, and NAT gateway
-Create a network security group to define inbound connections to your virtual network.
+* Create a virtual network with [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork)
-### Create virtual network, network security group, and bastion host
+* Create a network security group rule with [New-AzNetworkSecurityRuleConfig](/powershell/module/az.network/new-aznetworksecurityruleconfig)
-* Create a virtual network with [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork).
+* Create an Azure Bastion host with [New-AzBastion](/powershell/module/az.network/new-azbastion)
-* Create a network security group rule with [New-AzNetworkSecurityRuleConfig](/powershell/module/az.network/new-aznetworksecurityruleconfig).
+* Create the NAT gateway resource with [New-AzNatGateway](/powershell/module/az.network/new-aznatgateway)
-* Create an Azure Bastion host with [New-AzBastion](/powershell/module/az.network/new-azbastion).
-
-* Create a network security group with [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup).
+* Use [New-AzVirtualNetworkSubnetConfig](/powershell/module/az.network/new-azvirtualnetworksubnetconfig) to associate the NAT gateway to the subnet of the virtual network
```azurepowershell-interactive
-## Create backend subnet config ##
-$subnet = @{
- Name = 'myBackendSubnet'
- AddressPrefix = '10.1.0.0/24'
-}
-$subnetConfig = New-AzVirtualNetworkSubnetConfig @subnet
-
-## Create Azure Bastion subnet. ##
-$bastsubnet = @{
- Name = 'AzureBastionSubnet'
- AddressPrefix = '10.1.1.0/24'
-}
-$bastsubnetConfig = New-AzVirtualNetworkSubnetConfig @bastsubnet
-
-## Create the virtual network ##
-$net = @{
- Name = 'myVNet'
- ResourceGroupName = 'CreateIntLBQS-rg'
- Location = 'eastus'
- AddressPrefix = '10.1.0.0/16'
- Subnet = $subnetConfig,$bastsubnetConfig
-}
-$vnet = New-AzVirtualNetwork @net
-
-## Create public IP address for bastion host. ##
+## Create public IP address for NAT gateway ##
$ip = @{
- Name = 'myBastionIP'
- ResourceGroupName = 'CreateIntLBQS-rg'
+ Name = 'myNATgatewayIP'
+ ResourceGroupName = 'CreatePubLBQS-rg'
Location = 'eastus' Sku = 'Standard' AllocationMethod = 'Static' }
-$publicip = New-AzPublicIpAddress @ip
-
-## Create bastion host ##
-$bastion = @{
- ResourceGroupName = 'CreateIntLBQS-rg'
- Name = 'myBastion'
- PublicIpAddress = $publicip
- VirtualNetwork = $vnet
-}
-New-AzBastion @bastion -AsJob
-
-## Create rule for network security group and place in variable. ##
-$nsgrule = @{
- Name = 'myNSGRuleHTTP'
- Description = 'Allow HTTP'
- Protocol = '*'
- SourcePortRange = '*'
- DestinationPortRange = '80'
- SourceAddressPrefix = 'Internet'
- DestinationAddressPrefix = '*'
- Access = 'Allow'
- Priority = '2000'
- Direction = 'Inbound'
-}
-$rule1 = New-AzNetworkSecurityRuleConfig @nsgrule
-
-## Create network security group ##
-$nsg = @{
- Name = 'myNSG'
- ResourceGroupName = 'CreateIntLBQS-rg'
- Location = 'eastus'
- SecurityRules = $rule1
-}
-New-AzNetworkSecurityGroup @nsg
-
-```
-## Create standard load balancer
-
-This section details how you can create and configure the following components of the load balancer:
-
-* Create a front-end IP with [New-AzLoadBalancerFrontendIpConfig](/powershell/module/az.network/new-azloadbalancerfrontendipconfig) for the frontend IP pool. This IP receives the incoming traffic on the load balancer
-
-* Create a back-end address pool with [New-AzLoadBalancerBackendAddressPoolConfig](/powershell/module/az.network/new-azloadbalancerbackendaddresspoolconfig) for traffic sent from the frontend of the load balancer. This pool is where your backend virtual machines are deployed.
-
-* Create a health probe with [Add-AzLoadBalancerProbeConfig](/powershell/module/az.network/add-azloadbalancerprobeconfig) that determines the health of the backend VM instances.
-
-* Create a load balancer rule with [Add-AzLoadBalancerRuleConfig](/powershell/module/az.network/add-azloadbalancerruleconfig) that defines how traffic is distributed to the VMs.
-
-* Create a public load balancer with [New-AzLoadBalancer](/powershell/module/az.network/new-azloadbalancer).
--
-```azurepowershell-interactive
-## Place virtual network created in previous step into a variable. ##
-$vnet = Get-AzVirtualNetwork -Name 'myVNet' -ResourceGroupName 'CreateIntLBQS-rg'
-
-## Create load balancer frontend configuration and place in variable. ##
-$lbip = @{
- Name = 'myFrontEnd'
- PrivateIpAddress = '10.1.0.4'
- SubnetId = $vnet.subnets[0].Id
-}
-$feip = New-AzLoadBalancerFrontendIpConfig @lbip
-
-## Create backend address pool configuration and place in variable. ##
-$bepool = New-AzLoadBalancerBackendAddressPoolConfig -Name 'myBackEndPool'
-
-## Create the health probe and place in variable. ##
-$probe = @{
- Name = 'myHealthProbe'
- Protocol = 'tcp'
- Port = '80'
- IntervalInSeconds = '360'
- ProbeCount = '5'
-}
-$healthprobe = New-AzLoadBalancerProbeConfig @probe
+$publicIP = New-AzPublicIpAddress @ip
-## Create the load balancer rule and place in variable. ##
-$lbrule = @{
- Name = 'myHTTPRule'
- Protocol = 'tcp'
- FrontendPort = '80'
- BackendPort = '80'
- IdleTimeoutInMinutes = '15'
- FrontendIpConfiguration = $feip
- BackendAddressPool = $bePool
-}
-$rule = New-AzLoadBalancerRuleConfig @lbrule -EnableTcpReset
-
-## Create the load balancer resource. ##
-$loadbalancer = @{
- ResourceGroupName = 'CreateIntLBQS-rg'
- Name = 'myLoadBalancer'
- Location = 'eastus'
+## Create NAT gateway resource ##
+$nat = @{
+ ResourceGroupName = 'CreatePubLBQS-rg'
+ Name = 'myNATgateway'
+ IdleTimeoutInMinutes = '10'
Sku = 'Standard'
- FrontendIpConfiguration = $feip
- BackendAddressPool = $bePool
- LoadBalancingRule = $rule
- Probe = $healthprobe
-}
-New-AzLoadBalancer @loadbalancer
-
-```
-
-## Create virtual machines - Standard
-
-In this section, you'll create the three virtual machines for the backend pool of the load balancer.
-
-* Create three network interfaces with [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface).
-
-* Set an administrator username and password for the VMs with [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential).
-
-* Create the virtual machines with:
- * [New-AzVM](/powershell/module/az.compute/new-azvm)
- * [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig)
- * [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem)
- * [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage)
- * [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface)
-
-```azurepowershell-interactive
-# Set the administrator and password for the VMs. ##
-$cred = Get-Credential
-
-## Place the virtual network into a variable. ##
-$vnet = Get-AzVirtualNetwork -Name 'myVNet' -ResourceGroupName 'CreateIntLBQS-rg'
-
-## Place the load balancer into a variable. ##
-$lb = @{
- Name = 'myLoadBalancer'
- ResourceGroupName = 'CreateIntLBQS-rg'
-}
-$bepool = Get-AzLoadBalancer @lb | Get-AzLoadBalancerBackendAddressPoolConfig
-
-## Place the network security group into a variable. ##
-$nsg = Get-AzNetworkSecurityGroup -Name 'myNSG' -ResourceGroupName 'CreateIntLBQS-rg'
-
-## For loop with variable to create virtual machines for load balancer backend pool. ##
-for ($i=1; $i -le 3; $i++)
-{
-## Command to create network interface for VMs ##
-$nic = @{
- Name = "myNicVM$i"
- ResourceGroupName = 'CreateIntLBQS-rg'
Location = 'eastus'
- Subnet = $vnet.Subnets[0]
- NetworkSecurityGroup = $nsg
- LoadBalancerBackendAddressPool = $bepool
+ PublicIpAddress = $publicIP
}
-$nicVM = New-AzNetworkInterface @nic
+$natGateway = New-AzNatGateway @nat
-## Create a virtual machine configuration for VMs ##
-$vmsz = @{
- VMName = "myVM$i"
- VMSize = 'Standard_DS1_v2'
-}
-$vmos = @{
- ComputerName = "myVM$i"
- Credential = $cred
-}
-$vmimage = @{
- PublisherName = 'MicrosoftWindowsServer'
- Offer = 'WindowsServer'
- Skus = '2019-Datacenter'
- Version = 'latest'
-}
-$vmConfig = New-AzVMConfig @vmsz `
- | Set-AzVMOperatingSystem @vmos -Windows `
- | Set-AzVMSourceImage @vmimage `
- | Add-AzVMNetworkInterface -Id $nicVM.Id
-
-## Create the virtual machine for VMs ##
-$vm = @{
- ResourceGroupName = 'CreateIntLBQS-rg'
- Location = 'eastus'
- VM = $vmConfig
- Zone = "$i"
-}
-New-AzVM @vm -AsJob
-}
-
-```
-
-The deployments of the virtual machines and bastion host are submitted as PowerShell jobs. To view the status of the jobs, use [Get-Job](/powershell/module/microsoft.powershell.core/get-job):
-
-```azurepowershell-interactive
-Get-Job
-
-Id Name PSJobTypeName State HasMoreData Location Command
- - -- -- -- -
-1 Long Running O… AzureLongRunni… Completed True localhost New-AzBastion
-2 Long Running O… AzureLongRunni… Completed True localhost New-AzVM
-3 Long Running O… AzureLongRunni… Completed True localhost New-AzVM
-4 Long Running O… AzureLongRunni… Completed True localhost New-AzVM
-```
--
-# [**Basic SKU**](#tab/option-1-create-load-balancer-basic)
-
->[!NOTE]
->Standard SKU load balancer is recommended for production workloads. For more information about skus, see **[Azure Load Balancer SKUs](skus.md)**.
-
-In this section, you create a load balancer that load balances virtual machines.
-
-When you create an internal load balancer, a virtual network is configured as the network for the load balancer.
-
-## Configure virtual network - Basic
-
-Before you deploy VMs and test your load balancer, create the supporting virtual network resources.
-
-Create a virtual network for the backend virtual machines.
-
-Create a network security group to define inbound connections to your virtual network.
-
-### Create virtual network, network security group, and bastion host
-
-* Create a virtual network with [New-AzVirtualNetwork](/powershell/module/az.network/new-azvirtualnetwork).
-
-* Create a network security group rule with [New-AzNetworkSecurityRuleConfig](/powershell/module/az.network/new-aznetworksecurityruleconfig).
-
-* Create an Azure Bastion host with [New-AzBastion](/powershell/module/az.network/new-azbastion).
-
-* Create a network security group with [New-AzNetworkSecurityGroup](/powershell/module/az.network/new-aznetworksecuritygroup).
-
-```azurepowershell-interactive
## Create backend subnet config ## $subnet = @{ Name = 'myBackendSubnet' AddressPrefix = '10.1.0.0/24'
+ NatGateway = $natGateway
} $subnetConfig = New-AzVirtualNetworkSubnetConfig @subnet
$nsg = @{
New-AzNetworkSecurityGroup @nsg ```
-## Create basic load balancer
+## Create standard load balancer
This section details how you can create and configure the following components of the load balancer: * Create a front-end IP with [New-AzLoadBalancerFrontendIpConfig](/powershell/module/az.network/new-azloadbalancerfrontendipconfig) for the frontend IP pool. This IP receives the incoming traffic on the load balancer
-* Create a back-end address pool with [New-AzLoadBalancerBackendAddressPoolConfig](/powershell/module/az.network/new-azloadbalancerbackendaddresspoolconfig) for traffic sent from the frontend of the load balancer. This pool is where your backend virtual machines are deployed.
+* Create a back-end address pool with [New-AzLoadBalancerBackendAddressPoolConfig](/powershell/module/az.network/new-azloadbalancerbackendaddresspoolconfig) for traffic sent from the frontend of the load balancer
-* Create a health probe with [Add-AzLoadBalancerProbeConfig](/powershell/module/az.network/add-azloadbalancerprobeconfig) that determines the health of the backend VM instances.
+* Create a health probe with [Add-AzLoadBalancerProbeConfig](/powershell/module/az.network/add-azloadbalancerprobeconfig) that determines the health of the backend VM instances
-* Create a load balancer rule with [Add-AzLoadBalancerRuleConfig](/powershell/module/az.network/add-azloadbalancerruleconfig) that defines how traffic is distributed to the VMs.
+* Create a load balancer rule with [Add-AzLoadBalancerRuleConfig](/powershell/module/az.network/add-azloadbalancerruleconfig) that defines how traffic is distributed to the VMs
-* Create a public load balancer with [New-AzLoadBalancer](/powershell/module/az.network/new-azloadbalancer).
+* Create a public load balancer with [New-AzLoadBalancer](/powershell/module/az.network/new-azloadbalancer)
```azurepowershell-interactive ## Place virtual network created in previous step into a variable. ##
-$vnet = Get-AzVirtualNetwork -Name 'myVNet' -ResourceGroupName 'CreateIntLBQS-rg'
+$net = @{
+ Name = 'myVNet'
+ ResourceGroupName = 'CreateIntLBQS-rg'
+}
+$vnet = Get-AzVirtualNetwork @net
## Create load balancer frontend configuration and place in variable. ## $lbip = @{
$lbrule = @{
FrontendIpConfiguration = $feip BackendAddressPool = $bePool }
-$rule = New-AzLoadBalancerRuleConfig @lbrule
+$rule = New-AzLoadBalancerRuleConfig @lbrule -EnableTcpReset
## Create the load balancer resource. ## $loadbalancer = @{ ResourceGroupName = 'CreateIntLBQS-rg' Name = 'myLoadBalancer' Location = 'eastus'
- Sku = 'Basic'
+ Sku = 'Standard'
FrontendIpConfiguration = $feip BackendAddressPool = $bePool LoadBalancingRule = $rule
New-AzLoadBalancer @loadbalancer
```
-## Create virtual machines - Basic
+## Create virtual machines
-In this section, you'll create the virtual machines for the backend pool of the load balancer.
+In this section, you'll create the two virtual machines for the backend pool of the load balancer.
-* Create three network interfaces with [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface).
+* Create three network interfaces with [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface)
-* Set an administrator username and password for the VMs with [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential).
-
-* Use [New-AzAvailabilitySet](/powershell/module/az.compute/new-azvm) to create an availability set for the virtual machines.
+* Set an administrator username and password for the VMs with [Get-Credential](/powershell/module/microsoft.powershell.security/get-credential)
* Create the virtual machines with:
+
* [New-AzVM](/powershell/module/az.compute/new-azvm)
+
* [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig)
+
* [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem)
+
* [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage)
+
* [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface) ```azurepowershell-interactive # Set the administrator and password for the VMs. ## $cred = Get-Credential
-## Place the virtual network into a variable. ##
-$vnet = Get-AzVirtualNetwork -Name 'myVNet' -ResourceGroupName 'CreateIntLBQS-rg'
+## Place virtual network created in previous step into a variable. ##
+$net = @{
+ Name = 'myVNet'
+ ResourceGroupName = 'CreateIntLBQS-rg'
+}
+$vnet = Get-AzVirtualNetwork @net
## Place the load balancer into a variable. ## $lb = @{
$lb = @{
$bepool = Get-AzLoadBalancer @lb | Get-AzLoadBalancerBackendAddressPoolConfig ## Place the network security group into a variable. ##
-$nsg = Get-AzNetworkSecurityGroup -Name 'myNSG' -ResourceGroupName 'CreateIntLBQS-rg'
-
-## Create availability set for the virtual machines. ##
-$set = @{
- Name = 'myAvailabilitySet'
- ResourceGroupName = 'CreateIntLBQS-rg'
- Location = 'eastus'
- Sku = 'Aligned'
- PlatformFaultDomainCount = '2'
- PlatformUpdateDomainCount = '2'
+$sg = {
+ Name = 'myNSG'
+ ResourceGroupName = 'CreateIntLBQS-rg' @sg
}
-$avs = New-AzAvailabilitySet @set
+$nsg = Get-AzNetworkSecurityGroup
-## For loop with variable to create virtual machines. ##
-for ($i=1; $i -le 3; $i++)
+## For loop with variable to create virtual machines for load balancer backend pool. ##
+for ($i=1; $i -le 2; $i++)
{ ## Command to create network interface for VMs ## $nic = @{
$nicVM = New-AzNetworkInterface @nic
## Create a virtual machine configuration for VMs ## $vmsz = @{ VMName = "myVM$i"
- VMSize = 'Standard_DS1_v2'
- AvailabilitySetId = $avs.Id
+ VMSize = 'Standard_DS1_v2'
} $vmos = @{ ComputerName = "myVM$i"
$vm = @{
ResourceGroupName = 'CreateIntLBQS-rg' Location = 'eastus' VM = $vmConfig
+ Zone = "$i"
} New-AzVM @vm -AsJob }- ``` The deployments of the virtual machines and bastion host are submitted as PowerShell jobs. To view the status of the jobs, use [Get-Job](/powershell/module/microsoft.powershell.core/get-job):
Id Name PSJobTypeName State HasMoreData Location
1 Long Running O… AzureLongRunni… Completed True localhost New-AzBastion 2 Long Running O… AzureLongRunni… Completed True localhost New-AzVM 3 Long Running O… AzureLongRunni… Completed True localhost New-AzVM
-4 Long Running O… AzureLongRunni… Completed True localhost New-AzVM
``` [!INCLUDE [ephemeral-ip-note.md](../../includes/ephemeral-ip-note.md)] -- ## Install IIS Use [Set-AzVMExtension](/powershell/module/az.compute/set-azvmextension) to install the Custom Script Extension.
The extension runs `PowerShell Add-WindowsFeature Web-Server` to install the IIS
```azurepowershell-interactive ## For loop with variable to install custom script extension on virtual machines. ##
-for ($i=1; $i -le 3; $i++)
+for ($i=1; $i -le 2; $i++)
{ $ext = @{ Publisher = 'Microsoft.Compute'
Set-AzVMExtension @ext -AsJob
The extensions are deployed as PowerShell jobs. To view the status of the installation jobs, use [Get-Job](/powershell/module/microsoft.powershell.core/get-job): - ```azurepowershell-interactive Get-Job
Id Name PSJobTypeName State HasMoreData Location
10 Long Running O… AzureLongRunni… Running True localhost Set-AzVMExtension ``` -
-## Test the load balancer
-
-### Create virtual machine
+## Create the test virtual machine
Create the virtual machine with: * [New-AzNetworkInterface](/powershell/module/az.network/new-aznetworkinterface)+ * [New-AzVM](/powershell/module/az.compute/new-azvm)+ * [New-AzVMConfig](/powershell/module/az.compute/new-azvmconfig)+ * [Set-AzVMOperatingSystem](/powershell/module/az.compute/set-azvmoperatingsystem)+ * [Set-AzVMSourceImage](/powershell/module/az.compute/set-azvmsourceimage)+ * [Add-AzVMNetworkInterface](/powershell/module/az.compute/add-azvmnetworkinterface) ```azurepowershell-interactive
Create the virtual machine with:
$cred = Get-Credential ## Place the virtual network into a variable. ##
-$vnet = Get-AzVirtualNetwork -Name 'myVNet' -ResourceGroupName 'CreateIntLBQS-rg'
+$net = @{
+ Name = 'myVNet'
+ ResourceGroupName = 'CreateIntLBQS-rg'
+}
+$vnet = Get-AzVirtualNetwork @net
## Place the network security group into a variable. ##
-$nsg = Get-AzNetworkSecurityGroup -Name 'myNSG' -ResourceGroupName 'CreateIntLBQS-rg'
+$sg = {
+ Name = 'myNSG'
+ ResourceGroupName = 'CreateIntLBQS-rg' @sg
+}
+$nsg = Get-AzNetworkSecurityGroup
## Command to create network interface for VM ## $nic = @{
$vm = @{
VM = $vmConfig } New-AzVM @vm- ```
-### Test
+## Test the load balancer
1. [Sign in](https://portal.azure.com) to the Azure portal.
Remove-AzResourceGroup -Name 'CreateIntLBQS-rg'
In this quickstart:
-* You created a standard or basic internal load balancer
-* Attached virtual machines.
-* Configured the load balancer traffic rule and health probe.
-* Tested the load balancer.
+* You created an internal load balancer
+
+* Attached virtual machines
+
+* Configured the load balancer traffic rule and health probe
+
+* Tested the load balancer
To learn more about Azure Load Balancer, continue to: > [!div class="nextstepaction"]
logic-apps Logic Apps Handle Large Messages https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/logic-apps/logic-apps-handle-large-messages.md
Your logic app can then send an initial POST or PUT message to the target endpoi
After the endpoint responds with a suggested chunk size, your logic app follows up by sending HTTP PATCH requests that contain the content chunks.
-These steps describe the detailed process Logic Apps uses for uploading
+The following steps describe the detailed process Logic Apps uses for uploading
chunked content from your logic app to an endpoint:
-1. Your logic app sends an initial HTTP POST or PUT request
-with an empty message body. The request header,
-includes this information about the content that your logic app wants to upload in chunks:
+1. Your logic app sends an initial HTTP POST or PUT request with an empty message body. The request header, includes the following information about the content that your logic app wants to upload in chunks:
| Logic Apps request header field | Value | Type | Description | ||-||-|
includes this information about the content that your logic app wants to upload
| **x-ms-content-length** | <*content-length*> | Integer | The entire content size in bytes before chunking | ||||
-2. The endpoint responds with "200" success status code and this optional information:
+1. The endpoint responds with "200" success status code and the following information:
| Endpoint response header field | Type | Required | Description | |--||-|-|
- | **x-ms-chunk-size** | Integer | No | The suggested chunk size in bytes |
| **Location** | String | Yes | The URL location where to send the HTTP PATCH messages |
+ | **x-ms-chunk-size** | Integer | No | The suggested chunk size in bytes |
||||
-3. Your logic app creates and sends follow-up HTTP PATCH messages - each with this information:
+1. Your logic app creates and sends follow-up HTTP PATCH messages - each with the following information:
- * A content chunk based on **x-ms-chunk-size** or some internally calculated
- size until all the content totaling **x-ms-content-length** is sequentially uploaded
+ * A content chunk based on **x-ms-chunk-size** or some internally calculated size until all the content totaling **x-ms-content-length** is sequentially uploaded
- * These header details about the content chunk sent in each PATCH message:
+ * The following header information about the content chunk sent in each PATCH message:
| Logic Apps request header field | Value | Type | Description | ||-||-|
includes this information about the content that your logic app wants to upload
| **Content-Length** | <*content-length*> | String | The length of size in bytes of the current chunk | |||||
-4. After each PATCH request, the endpoint confirms the receipt
-for each chunk by responding with the "200" status code and the following response headers:
+1. After each PATCH request, the endpoint confirms the receipt for each chunk by responding with the "200" status code and the following response headers:
| Endpoint response header field | Type | Required | Description | |--||-|-|
for each chunk by responding with the "200" status code and the following respon
| **x-ms-chunk-size** | Integer | No | The suggested chunk size in bytes | ||||
-For example, this action definition shows an HTTP POST
-request for uploading chunked content to an endpoint.
-In the action's `runTimeConfiguration` property,
+For example, this action definition shows an HTTP POST request for uploading chunked content to an endpoint. In the action's `runTimeConfiguration` property,
the `contentTransfer` property sets `transferMode` to `chunked`: ```json
machine-learning How To Data Prep Synapse Spark Pool https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/machine-learning/how-to-data-prep-synapse-spark-pool.md
script_run_config = ScriptRunConfig(source_directory = './code',
run_config = run_config) ```
+For more infomation about `run_config.spark.configuration` and general Spark configuration, see [SparkConfiguration Class](/python/api/azureml-core/azureml.core.runconfig.sparkconfiguration) and [Apache Spark's configuration documentation](https://spark.apache.org/docs/latest/configuration.html).
+ Once your `ScriptRunConfig` object is set up, you can submit the run. ```python
managed-instance-apache-cassandra Configure Hybrid Cluster https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/managed-instance-apache-cassandra/configure-hybrid-cluster.md
This quickstart demonstrates how to use the Azure CLI commands to configure a hy
> [!NOTE] > If you want to add more datacenters, you can repeat the above steps, but you only need the seed nodes.
+ > [!IMPORTANT]
+ > If your existing Apache Cassandra cluster only has a single data center, and this is the first time a data center is being added, ensure that the `endpoint_snitch` parameter in `cassandra.yaml` is set to `GossipingPropertyFileSnitch`.
+ 1. Finally, use the following CQL query to update the replication strategy in each keyspace to include all datacenters across the cluster: ```bash
This quickstart demonstrates how to use the Azure CLI commands to configure a hy
```bash ALTER KEYSPACE "system_auth" WITH REPLICATION = {'class': 'NetworkTopologyStrategy', 'on-premise-dc': 3, 'managed-instance-dc': 3} ```
+
+ > [!IMPORTANT]
+ > If you are using hybrid cluster as a method of migrating historic data into the new Azure Managed Instance Cassandra data centers, ensure that you run `nodetool repair --full` on all the nodes in your existing cluster's data center. You should run this only after all of the above steps have been taken. This should ensure that all historical data is replicated to your new data centers in Azure Managed Instance for Apache Cassandra. If you have a very large amount of data in your existing cluster, it may be necessary to run the repairs at the keyspace or even table level - see [here](https://cassandra.apache.org/doc/latest/cassandra/operating/repair.html) for more details on running repairs in Cassandra. Prior to changing the replication settings, you should also make sure that any application code that connects to your existing Cassandra cluster is using LOCAL_QUORUM. You should leave it at this setting during the migration (it can be switched back afterwards if required).
## Troubleshooting
mysql Connect Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/mysql/connect-go.md
Last updated 5/26/2020
[!INCLUDE[applies-to-mysql-single-server](includes/applies-to-mysql-single-server.md)]
-This quickstart demonstrates how to connect to an Azure Database for MySQL from Windows, Ubuntu Linux, and Apple macOS platforms by using code written in the [Go](https://golang.org/) language. It shows how to use SQL statements to query, insert, update, and delete data in the database. This topic assumes that you are familiar with development using Go and that you are new to working with Azure Database for MySQL.
+This quickstart demonstrates how to connect to an Azure Database for MySQL from Windows, Ubuntu Linux, and Apple macOS platforms by using code written in the [Go](https://go.dev/) language. It shows how to use SQL statements to query, insert, update, and delete data in the database. This topic assumes that you are familiar with development using Go and that you are new to working with Azure Database for MySQL.
## Prerequisites This quickstart uses the resources created in either of these guides as a starting point:
This quickstart uses the resources created in either of these guides as a starti
> Ensure the IP address you're connecting from has been added the server's firewall rules using the [Azure portal](./howto-manage-firewall-using-portal.md) or [Azure CLI](./howto-manage-firewall-using-cli.md) ## Install Go and MySQL connector
-Install [Go](https://golang.org/doc/install) and the [go-sql-driver for MySQL](https://github.com/go-sql-driver/mysql#installation) on your own computer. Depending on your platform, follow the steps in the appropriate section:
+Install [Go](https://go.dev/doc/install) and the [go-sql-driver for MySQL](https://github.com/go-sql-driver/mysql#installation) on your own computer. Depending on your platform, follow the steps in the appropriate section:
### Windows
-1. [Download](https://golang.org/dl/) and install Go for Microsoft Windows according to the [installation instructions](https://golang.org/doc/install).
+1. [Download](https://go.dev/dl/) and install Go for Microsoft Windows according to the [installation instructions](https://go.dev/doc/install).
2. Launch the command prompt from the start menu. 3. Make a folder for your project such. `mkdir %USERPROFILE%\go\src\mysqlgo`. 4. Change directory into the project folder, such as `cd %USERPROFILE%\go\src\mysqlgo`.
Install [Go](https://golang.org/doc/install) and the [go-sql-driver for MySQL](h
``` ### Apple macOS
-1. Download and install Go according to the [installation instructions](https://golang.org/doc/install) matching your platform.
+1. Download and install Go according to the [installation instructions](https://go.dev/doc/install) matching your platform.
2. Launch the Bash shell. 3. Make a folder for your project in your home directory, such as `mkdir -p ~/go/src/mysqlgo/`. 4. Change directory into the folder, such as `cd ~/go/src/mysqlgo/`.
Get the connection information needed to connect to the Azure Database for MySQL
## Connect, create table, and insert data Use the following code to connect to the server, create a table, and load the data by using an **INSERT** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and it checks the connection by using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://golang.org/pkg/database/sql/#DB.Exec) method several times to run several DDL commands. The code also uses [Prepare()](http://go-database-sql.org/prepared.html) and Exec() to run prepared statements with different parameters to insert three rows. Each time, a custom checkError() method is used to check if an error occurred and panic to exit.
+The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and it checks the connection by using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://go.dev/pkg/database/sql/#DB.Exec) method several times to run several DDL commands. The code also uses [Prepare()](http://go-database-sql.org/prepared.html) and Exec() to run prepared statements with different parameters to insert three rows. Each time, a custom checkError() method is used to check if an error occurred and panic to exit.
Replace the `host`, `database`, `user`, and `password` constants with your own values.
func main() {
## Read data Use the following code to connect and read the data by using a **SELECT** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Query()](https://golang.org/pkg/database/sql/#DB.Query) method to run the select command. Then it runs [Next()](https://golang.org/pkg/database/sql/#Rows.Next) to iterate through the result set and [Scan()](https://golang.org/pkg/database/sql/#Rows.Scan) to parse the column values, saving the value into variables. Each time a custom checkError() method is used to check if an error occurred and panic to exit.
+The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Query()](https://go.dev/pkg/database/sql/#DB.Query) method to run the select command. Then it runs [Next()](https://go.dev/pkg/database/sql/#Rows.Next) to iterate through the result set and [Scan()](https://go.dev/pkg/database/sql/#Rows.Scan) to parse the column values, saving the value into variables. Each time a custom checkError() method is used to check if an error occurred and panic to exit.
Replace the `host`, `database`, `user`, and `password` constants with your own values.
func main() {
## Update data Use the following code to connect and update the data using a **UPDATE** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://golang.org/pkg/database/sql/#DB.Exec) method to run the update command. Each time a custom checkError() method is used to check if an error occurred and panic to exit.
+The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://go.dev/pkg/database/sql/#DB.Exec) method to run the update command. Each time a custom checkError() method is used to check if an error occurred and panic to exit.
Replace the `host`, `database`, `user`, and `password` constants with your own values.
func main() {
## Delete data Use the following code to connect and remove data using a **DELETE** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [go sql driver for mysql](https://github.com/go-sql-driver/mysql#installation) as a driver to communicate with the Azure Database for MySQL, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://golang.org/pkg/database/sql/#DB.Exec) method to run the delete command. Each time a custom checkError() method is used to check if an error occurred and panic to exit.
+The code calls method [sql.Open()](http://go-database-sql.org/accessing.html) to connect to Azure Database for MySQL, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://go.dev/pkg/database/sql/#DB.Exec) method to run the delete command. Each time a custom checkError() method is used to check if an error occurred and panic to exit.
Replace the `host`, `database`, `user`, and `password` constants with your own values.
mysql Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/mysql/flexible-server/overview.md
Title: Overview - Azure Database for MySQL - Flexible Server description: Learn about the Azure Database for MySQL Flexible server, a relational database service in the Microsoft cloud based on the MySQL Community Edition.++ - - Previously updated : 08/10/2021 Last updated : 03/23/2022 + # Azure Database for MySQL - Flexible Server
-[[!INCLUDE[applies-to-mysql-flexible-server](../includes/applies-to-mysql-flexible-server.md)]
+<iframe src="https://aka.ms/docs/player?id=492c7a41-5f0a-4482-828b-72be1b38e691" width="640" height="370"></iframe>
Azure Database for MySQL powered by the MySQL community edition is available in two deployment modes:
You can take advantage of this offer to develop and deploy applications that use
## High availability within and across availability zones
-Azure Database for MySQL Flexible Server allows configuring high availability with automatic failover. The high availability solution is designed to ensure that committed data is never lost due to failures, and improve overall uptime for your application. When high availability is configured, flexible server automatically provisions and manages a standby replica. You are billed for the provisioned compute and storage for both the primary and secondary replica. There are two high availability-architectural models:
+Azure Database for MySQL Flexible Server allows configuring high availability with automatic failover. The high availability solution is designed to ensure that committed data is never lost due to failures, and improve overall uptime for your application. When high availability is configured, flexible server automatically provisions and manages a standby replica. You're billed for the provisioned compute and storage for both the primary and secondary replica. There are two high availability-architectural models:
- **Zone Redundant High Availability (HA):** This option is preferred for complete isolation and redundancy of infrastructure across multiple availability zones. It provides highest level of availability, but it requires you to configure application redundancy across zones. Zone redundant HA is preferred when you want to achieve highest level of availability against any infrastructure failure in the availability zone and where latency across the availability zone is acceptable. Zone redundant HA is available in [subset of Azure regions](overview.md#azure-regions) where the region supports multiple Availability Zones and Zone redundant Premium file shares are available.
Now that you've read an introduction to Azure Database for MySQL - Single-Server
- Build your first app using your preferred language: - [Python](connect-python.md)
- - [PHP](connect-php.md)
+ - [PHP](connect-php.md)
network-watcher Network Watcher Troubleshoot Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/network-watcher/network-watcher-troubleshoot-overview.md
The resource troubleshooting log files are stored in a storage account after res
![zip file][1] > [!NOTE]
-> In some cases, only a subset of the logs files is written to storage.
+> 1. In some cases, only a subset of the logs files is written to storage.
+> 2. For newer Gateway versions, the IkeErrors.txt, Scrubbed-wfpdiag.txt and wfpdiag.txt.sum have been replaced by a IkeLogs.txt file that contains the whole IKE activity (not just errors).
For instructions on downloading files from Azure storage accounts, refer to [Get started with Azure Blob storage using .NET](../storage/blobs/storage-quickstart-blobs-dotnet.md). Another tool that can be used is Storage Explorer. More information about Storage Explorer can be found here at the following link: [Storage Explorer](https://storageexplorer.com/)
The **CPUStats.txt** file contains CPU usage and memory available at the time of
Current CPU Usage : 0 % Current Memory Available : 641 MBs ```
+### IKElogs.txt
+
+The **IKElogs.txt** file contains any IKE activity that was found during monitoring.
+
+The following example shows the contents of an IKElogs.txt file.
+
+```
+Remote <IPaddress>:500: Local <IPaddress>:500: [RECEIVED][SA_AUTH] Received IKE AUTH message
+Remote <IPaddress>:500: Local <IPaddress>:500: Received Traffic Selector payload request- [Tsid 0x729 ]Number of TSIs 2: StartAddress 10.20.0.0 EndAddress 10.20.255.255 PortStart 0 PortEnd 65535 Protocol 0, StartAddress 192.168.100.0 EndAddress 192.168.100.255 PortStart 0 PortEnd 65535 Protocol 0 Number of TSRs 1:StartAddress 0.0.0.0 EndAddress 255.255.255.255 PortStart 0 PortEnd 65535 Protocol 0
+Remote <IPaddress>:500: Local <IPaddress>:500: [SEND] Proposed Traffic Selector payload will be (Final Negotiated) - [Tsid 0x729 ]Number of TSIs 2: StartAddress 10.20.0.0 EndAddress 10.20.255.255 PortStart 0 PortEnd 65535 Protocol 0, StartAddress 192.168.100.0 EndAddress 192.168.100.255 PortStart 0 PortEnd 65535 Protocol 0 Number of TSRs 1:StartAddress 0.0.0.0 EndAddress 255.255.255.255 PortStart 0 PortEnd 65535 Protocol 0
+Remote <IPaddress>:500: Local <IPaddress>:500: [RECEIVED]Received IPSec payload: Policy1:Cipher=DESIntegrity=Md5
+IkeCleanupQMNegotiation called with error 13868 and flags a
+Remote <IPaddress>:500: Local <IPaddress>:500: [SEND][NOTIFY] Sending Notify Message - Policy Mismatch
+```
+ ### IKEErrors.txt The **IKEErrors.txt** file contains any IKE errors that were found during monitoring.
orbital Geospatial Reference Architecture https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/orbital/geospatial-reference-architecture.md
Azure has many native geospatial capabilities. In this diagram and the ones that
:::image type="content" source="media/geospatial-overview.png" alt-text="Geospatial On Azure" lightbox="media/geospatial-overview.png":::
-This architecture flow assumes that the data may be coming from databases, files or streaming sources and not stored in a native GIS format. Once the data is ingested with Azure Data Factory, or via Azure IoT, Event Hubs and Stream Analytics, it could then be stored permanently in warm storage with Azure SQL, Azure SQL Managed Instance, Azure Database for PostgreSQL or Azure Data Lake. From there, the data can be transformed and processed in batch with Azure Batch or Synapse Spark Pool, of which both can be automated through the usage of an Azure Data Factory or Synapse pipeline. For real-time data, it can be further transformed or processed with Stream Analytics, Azure Maps or brought into context with Azure Digital Twins. Once the data is transformed, it can then once again be served for additional uses in Azure SQL DB or Azure Database for PostgreSQL, Synapse SQL Pool (for abstracted non-geospatial data), Cosmos DB or Azure Data Explorer. Once ready, the data can be queried directly through the data base API, but frequently a publish layer is used. The Azure Maps Data API would suffice for small datasets, otherwise a non-native service can be introduced based on OSS or COTS, for accessing the data through web services or desktop applications. Finally, the Azure Maps Web SDK hosted in Azure App Service would allow for geovisualization. Another option is to use Azure Maps in Power BI. Lastly, HoloLens and Azure Spatial Anchors can be used to view the data and place it in the real-world for virtual reality (VR) and augmented reality (AR) experiences.
+This architecture flow assumes that the data may be coming from databases, files or streaming sources and not stored in a native GIS format. Once the data is ingested with Azure Data Factory, or via Azure IoT, Event Hubs and Stream Analytics, it could then be stored permanently in warm storage with Azure SQL, Azure SQL Managed Instance, Azure Database for PostgreSQL or Azure Data Lake Storage. From there, the data can be transformed and processed in batch with Azure Batch or Synapse Spark Pool, of which both can be automated through the usage of an Azure Data Factory or Synapse pipeline. For real-time data, it can be further transformed or processed with Stream Analytics, Azure Maps or brought into context with Azure Digital Twins. Once the data is transformed, it can then once again be served for additional uses in Azure SQL DB or Azure Database for PostgreSQL, Synapse SQL Pool (for abstracted non-geospatial data), Cosmos DB or Azure Data Explorer. Once ready, the data can be queried directly through the data base API, but frequently a publish layer is used. The Azure Maps Data API would suffice for small datasets, otherwise a non-native service can be introduced based on OSS or COTS, for accessing the data through web services or desktop applications. Finally, the Azure Maps Web SDK hosted in Azure App Service would allow for geovisualization. Another option is to use Azure Maps in Power BI. Lastly, HoloLens and Azure Spatial Anchors can be used to view the data and place it in the real-world for virtual reality (VR) and augmented reality (AR) experiences.
It should be noted as well that many of these options are optional and could be supplemented with OSS to reduce cost while also maintaining scalability, or 3rd party tools to utilize their specific capabilities. The next session addresses this need.
orbital Space Partner Program Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/orbital/space-partner-program-overview.md
+
+ Title: What is the Space Partner Program?
+description: Overview of the Azure Space Partner Program
++++ Last updated : 3/21/2022+
+# Customer intent: Educate potential partners on how to engage with the Azure Space partner programs.
++
+# What is the Space Partner Program?
+
+At Microsoft, our mission is to empower every person and every organization on the planet to achieve more. When it comes to space, weΓÇÖre investing in building the tools that will allow every person and organization on Earth to realize the incredible potential of space.
+
+We're a partner-led and ecosystem-focused platform. Our approach to space is multi-band, multi-vendor, and multi-orbit. Our thriving ecosystem of space partners allows for the most comprehensive and innovative offerings for our joint customers and the future of space computing.
+
+Our differentiated ecosystem of partners spans space operators, manufacturers, systems integrators, data providers, analytics and AI ISVs, and more. These partners cover a breadth of capabilities like virtualization, data processing and insight, and ground infrastructure.
++
+## Why join the Space Partner Program?
+
+We believe in a better together story for Space and Spectrum partners running on Azure. By joining the program, you can gain access to various benefits such as:
+
+- Azure Engineering Training & Adoption Resources
+- Quarterly NDA roadmap reviews and newsletters
+- Participation in Space and Spectrum focused Microsoft events
+- Co-engineering for customer POCs
+- Microsoft Product Integration or add-ins
+- Joint PR & Marketing for POCs and co-investment
+- Azure Sponsorship Credits
+- Co-sell and joint GTM coordination
+- Opportunities to be showcased in Microsoft customer presentations and sales trainings
++
+## Partner Requirements
+
+To join the program, we ask partners to commit to:
+
+- Sign a non-disclosure agreement with Microsoft
+- Run solution(s) on Azure including Azure monetary commitment
+- Provide resourcing towards joint goals and engagements
+- Participate in quarterly business and GTM reviews with committed pipelines and metrics
+- Participate in joint marketing, customer stories, and events
+
+## Learn More
+
+- [Watch this video about end-to-end Space workloads on Azure](https://youtu.be/JTt4De5FRtg)
+- Read Microsoft and Partner blogs for the latest Azure Space updates:
+ - [Connecting to the ISS](https://azure.microsoft.com/blog/connecting-azure-to-the-international-space-station-with-hewlett-packard-enterprise/)
+ - [Geospatial partnerships and capabilities](https://azure.microsoft.com/blog/new-satellite-connectivity-and-geospatial-capabilities-with-azure-space/)
+ - [Space connectivity partnerships](https://news.microsoft.com/transform/azure-space-partners-bring-deep-expertise-to-new-venture/)
+ - [Airbus partnership and geospatial scenarios](https://azure.microsoft.com/blog/geospatial-imagery-unlocks-new-cloud-computing-scenarios-on-azure/)
+ - [5G core for Gov with Lockheed Martin](https://azure.microsoft.com/blog/new-azure-for-operators-solutions-and-services-built-for-the-future-of-telecommunications/)
+ - [Private network based on SATCOM with Intelsat](https://www.intelsat.com/newsroom/intelsat-collaborates-with-microsoft-to-demonstrate-private-cellular-network-using-intelsats-global-satellite-and-ground-network/)
+- [Read this public deck on Microsoft Space offerings](https://azurespace.blob.core.windows.net/docs/Azure_Space_Public_Deck.pdf)
+- Reach out to [SpacePartnerProgram@microsoft.com](mailto:SpacePartnerProgram@microsoft.com) to learn more and sign a non-disclosure agreement
+
+## Next steps
+
+- [Sign up for MS Startups for access to credits and support](https://startups.microsoft.com/)
+- [Downlink data from satellites using Azure Orbital](overview.md)
+- [Analyze space data on Azure](/azure/architecture/example-scenario/data/geospatial-data-processing-analytics-azure)
+- [Drive insights with geospatial partners on Azure ΓÇô ESRI and visualize with Power BI](https://azuremarketplace.microsoft.com/en/marketplace/apps/esri.arcgis-enterprise?tab=Overview)
+- [Use the Azure Software Radio Developer VM to jump start your software radio development](https://github.com/microsoft/azure-software-radio)
+- [List your app on the Azure Marketplace](/azure/marketplace/determine-your-listing-type#free-trial)
postgresql Connect Go https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/postgresql/connect-go.md
Last updated 5/6/2019
# Quickstart: Use Go language to connect and query data in Azure Database for PostgreSQL - Single Server
-This quickstart demonstrates how to connect to an Azure Database for PostgreSQL using code written in the [Go](https://golang.org/) language (golang). It shows how to use SQL statements to query, insert, update, and delete data in the database. This article assumes you are familiar with development using Go, but that you are new to working with Azure Database for PostgreSQL.
+This quickstart demonstrates how to connect to an Azure Database for PostgreSQL using code written in the [Go](https://go.dev/) language (golang). It shows how to use SQL statements to query, insert, update, and delete data in the database. This article assumes you are familiar with development using Go, but that you are new to working with Azure Database for PostgreSQL.
## Prerequisites This quickstart uses the resources created in either of these guides as a starting point:
This quickstart uses the resources created in either of these guides as a starti
- [Create DB - Azure CLI](quickstart-create-server-database-azure-cli.md) ## Install Go and pq connector
-Install [Go](https://golang.org/doc/install) and the [Pure Go Postgres driver (pq)](https://github.com/lib/pq) on your own machine. Depending on your platform, follow the appropriate steps:
+Install [Go](https://go.dev/doc/install) and the [Pure Go Postgres driver (pq)](https://github.com/lib/pq) on your own machine. Depending on your platform, follow the appropriate steps:
### Windows
-1. [Download](https://golang.org/dl/) and install Go for Microsoft Windows according to the [installation instructions](https://golang.org/doc/install).
+1. [Download](https://go.dev/dl/) and install Go for Microsoft Windows according to the [installation instructions](https://go.dev/doc/install).
2. Launch the command prompt from the start menu. 3. Make a folder for your project, such as `mkdir %USERPROFILE%\go\src\postgresqlgo`. 4. Change directory into the project folder, such as `cd %USERPROFILE%\go\src\postgresqlgo`.
Install [Go](https://golang.org/doc/install) and the [Pure Go Postgres driver (p
``` ### Apple macOS
-1. Download and install Go according to the [installation instructions](https://golang.org/doc/install) matching your platform.
+1. Download and install Go according to the [installation instructions](https://go.dev/doc/install) matching your platform.
2. Launch the Bash shell. 3. Make a folder for your project in your home directory, such as `mkdir -p ~/go/src/postgresqlgo/`. 4. Change directory into the folder, such as `cd ~/go/src/postgresqlgo/`.
Get the connection information needed to connect to the Azure Database for Postg
## Connect and create a table Use the following code to connect and create a table using **CREATE TABLE** SQL statement, followed by **INSERT INTO** SQL statements to add rows into the table.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the PostgreSQL server, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the PostgreSQL server, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://golang.org/pkg/database/sql/#DB.Exec) method several times to run several SQL commands. Each time a custom checkError() method checks if an error occurred and panic to exit if an error does occur.
+The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://go.dev/pkg/database/sql/#DB.Exec) method several times to run several SQL commands. Each time a custom checkError() method checks if an error occurred and panic to exit if an error does occur.
Replace the `HOST`, `DATABASE`, `USER`, and `PASSWORD` parameters with your own values.
func main() {
## Read data Use the following code to connect and read the data using a **SELECT** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the PostgreSQL server, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the PostgreSQL server, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The select query is run by calling method [db.Query()](https://golang.org/pkg/database/sql/#DB.Query), and the resulting rows are kept in a variable of type
-[rows](https://golang.org/pkg/database/sql/#Rows). The code reads the column data values in the current row using method [rows.Scan()](https://golang.org/pkg/database/sql/#Rows.Scan) and loops over the rows using the iterator [rows.Next()](https://golang.org/pkg/database/sql/#Rows.Next) until no more rows exist. Each row's column values are printed to the console out. Each time a custom checkError() method is used to check if an error occurred and panic to exit if an error does occur.
+The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The select query is run by calling method [db.Query()](https://go.dev/pkg/database/sql/#DB.Query), and the resulting rows are kept in a variable of type
+[rows](https://go.dev/pkg/database/sql/#Rows). The code reads the column data values in the current row using method [rows.Scan()](https://go.dev/pkg/database/sql/#Rows.Scan) and loops over the rows using the iterator [rows.Next()](https://go.dev/pkg/database/sql/#Rows.Next) until no more rows exist. Each row's column values are printed to the console out. Each time a custom checkError() method is used to check if an error occurred and panic to exit if an error does occur.
Replace the `HOST`, `DATABASE`, `USER`, and `PASSWORD` parameters with your own values.
func main() {
## Update data Use the following code to connect and update the data using an **UPDATE** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the Postgres server, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the Postgres server, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://golang.org/pkg/database/sql/#DB.Exec) method to run the SQL statement that updates the table. A custom checkError() method is used to check if an error occurred and panic to exit if an error does occur.
+The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://go.dev/pkg/database/sql/#DB.Exec) method to run the SQL statement that updates the table. A custom checkError() method is used to check if an error occurred and panic to exit if an error does occur.
Replace the `HOST`, `DATABASE`, `USER`, and `PASSWORD` parameters with your own values. ```go
func main() {
## Delete data Use the following code to connect and delete the data using a **DELETE** SQL statement.
-The code imports three packages: the [sql package](https://golang.org/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the Postgres server, and the [fmt package](https://golang.org/pkg/fmt/) for printed input and output on the command line.
+The code imports three packages: the [sql package](https://go.dev/pkg/database/sql/), the [pq package](https://godoc.org/github.com/lib/pq) as a driver to communicate with the Postgres server, and the [fmt package](https://go.dev/pkg/fmt/) for printed input and output on the command line.
-The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://golang.org/pkg/database/sql/#DB.Ping). A [database handle](https://golang.org/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://golang.org/pkg/database/sql/#DB.Exec) method to run the SQL statement that deletes a row from the table. A custom checkError() method is used to check if an error occurred and panic to exit if an error does occur.
+The code calls method [sql.Open()](https://godoc.org/github.com/lib/pq#Open) to connect to Azure Database for PostgreSQL database, and checks the connection using method [db.Ping()](https://go.dev/pkg/database/sql/#DB.Ping). A [database handle](https://go.dev/pkg/database/sql/#DB) is used throughout, holding the connection pool for the database server. The code calls the [Exec()](https://go.dev/pkg/database/sql/#DB.Exec) method to run the SQL statement that deletes a row from the table. A custom checkError() method is used to check if an error occurred and panic to exit if an error does occur.
Replace the `HOST`, `DATABASE`, `USER`, and `PASSWORD` parameters with your own values. ```go
private-link Create Private Link Service Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/private-link/create-private-link-service-portal.md
In this section, you create a virtual network and subnet to host the load balanc
### Create NAT gateway
-In this section, you'll create a NAT gateway and assign it to the subnet in the virtual network you created previously.
+In this section, you'll create a NAT gateway and assign it to the subnet in the virtual network you created previously. The NAT gateway is used by the resources in the load balancer virtual network for outbound internet access. If the virtual machines in the backend pool of the load balancer don't require outbound internet access, you can proceed to the next section.
1. On the upper-left side of the screen, select **Create a resource > Networking > NAT gateway** or search for **NAT gateway** in the search box.
private-link Private Endpoint Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/private-link/private-endpoint-overview.md
A private-link resource is the destination target of a specified private endpoin
| Azure Synapse Analytics | Microsoft.Synapse/workspaces | SQL, SqlOnDemand, Dev | | Azure App Service | Microsoft.Web/hostingEnvironments | hosting environment | | Azure App Service | Microsoft.Web/sites | sites |
-| Azure App Service | Microsoft.Web/staticSites | staticSite |
+| Azure Static Web Apps | Microsoft.Web/staticSites | staticSite |
> [!NOTE] > You can create private endpoints only on a General Purpose v2 (GPv2) storage account.
public-multi-access-edge-compute-mec Tutorial Create Vm Using Go Sdk https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/public-multi-access-edge-compute-mec/tutorial-create-vm-using-go-sdk.md
In this tutorial, you learn how to:
- Add an allowlisted subscription to your Azure account, which allows you to deploy resources in Azure public MEC. If you don't have an active allowed subscription, contact the [Azure public MEC product team](https://aka.ms/azurepublicmec). -- [Install Go](https://golang.org/doc/install)
+- [Install Go](https://go.dev/doc/install)
- [Install the Azure SDK for Go](/azure/developer/go/azure-sdk-install)
purview How To Enable Data Use Governance https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/purview/how-to-enable-data-use-governance.md
Previously updated : 3/07/2022 Last updated : 3/24/2022
To disable data use governance for a source, resource group, or subscription, a
1. Set the **Data use governance** toggle to **Disabled**.
+## Delegation of access control responsibility to Azure Purview
+Note:
+1. Once a resource has been enabled for *Data use Governance*, **any** Azure Purview *policy author* will be able to create access policies against it, and **any** Azure Purview *Data source admin* will be able to publish those policies at **any point afterwards**.
+1. **Any** Azure Purview *root collection admin* can create **new** *Data Source Admin* and *Policy author* roles.
-### Important considerations related to Data use governance
+## Additional considerations related to Data use governance
- Make sure you write down the **Name** you use when registering in Azure Purview. You will need it when you publish a policy. The recommended practice is to make the registered name exactly the same as the endpoint name. - To disable a source for *Data use governance*, remove it first from being bound (i.e. published) in any policy. - While user needs to have both data source *Owner* and Azure Purview *Data source admin* to enable a source for *Data use governance*, either of those roles can independently disable it.
To disable data use governance for a source, resource group, or subscription, a
> - Moving data sources to a different resource group or subscription is not yet supported. If want to do that, de-register the data source in Azure Purview before moving it and then register it again after that happens. > - Once a subscription gets disabled for *Data use governance* any underlying assets that are enabled for *Data use governance* will be disabled, which is the right behavior. However, policy statements based on those assets will still be allowed after that.
-### Data use governance best practices
+## Data use governance best practices
- We highly encourage registering data sources for *Data use governance* and managing all associated access policies in a single Azure Purview account. - Should you have multiple Azure Purview accounts, be aware that **all** data sources belonging to a subscription must be registered for *Data use governance* in a single Azure Purview account. That Azure Purview account can be in any subscription in the tenant. The *Data use governance* toggle will become greyed out when there are invalid configurations. Some examples of valid and invalid configurations follow in the diagram below: - **Case 1** shows a valid configuration where a Storage account is registered in an Azure Purview account in the same subscription.
To disable data use governance for a source, resource group, or subscription, a
![Diagram shows valid and invalid configurations when using multiple Azure Purview accounts to manage policies.](./media/access-policies-common/valid-and-invalid-configurations.png) - ## Next steps - [Create data owner policies for your resources](how-to-data-owner-policy-authoring-generic.md)
purview How To Link Azure Data Share https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/purview/how-to-link-azure-data-share.md
Previously updated : 11/25/2020 Last updated : 03/14/2022 # How to connect Azure Data Share and Azure Purview
purview Tutorial Azure Purview Checklist https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/purview/tutorial-azure-purview-checklist.md
This article lists prerequisites that help you get started quickly on Azure Purv
|1 | Azure Active Directory Tenant |N/A |An [Azure Active Directory tenant](../active-directory/fundamentals/active-directory-access-create-new-tenant.md) should be associated with your subscription. <ul><li>*Global Administrator* or *Information Protection Administrator* role is required, if you plan to [extend Microsoft 365 Sensitivity Labels to Azure Purview for files and db columns](create-sensitivity-label.md)</li><li> *Global Administrator* or *Power BI Administrator* role is required, if you're planning to [scan Power BI tenants](register-scan-power-bi-tenant.md).</li></ul> | |2 |An active Azure Subscription |*Subscription Owner* |An Azure subscription is needed to deploy Azure Purview and its managed resources. If you don't have an Azure subscription, create a [free subscription](https://azure.microsoft.com/free/) before you begin. | |3 |Define whether you plan to deploy an Azure Purview with managed Event Hub | N/A |A managed Event Hub is created as part of Azure Purview account creation, see Azure Purview account creation. You can publish messages to the Event Hub kafka topic ATLAS_HOOK and Azure Purview will consume and process it. Azure Purview will notify entity changes to Event Hub kafka topic ATLAS_ENTITIES and user can consume and process it. |
-|4 |Register the following resource providers: <ul><li>Microsoft.Storage</li><li>Microsoft.EventHub (optional)</li><li>Microsoft.Purview</li></ul> |*Subscription Owner* or custom role to register Azure resource providers (_/register/action_) | [Register required Azure Resource Providers](/azure-resource-manager/management/resource-providers-and-types.md) in the Azure Subscription that is designated for Azure Purview Account. Review [Azure resource provider operations](../role-based-access-control/resource-provider-operations.md). |
+|4 |Register the following resource providers: <ul><li>Microsoft.Storage</li><li>Microsoft.EventHub (optional)</li><li>Microsoft.Purview</li></ul> |*Subscription Owner* or custom role to register Azure resource providers (_/register/action_) | [Register required Azure Resource Providers](/azure/azure-resource-manager/management/resource-providers-and-types) in the Azure Subscription that is designated for Azure Purview Account. Review [Azure resource provider operations](../role-based-access-control/resource-provider-operations.md). |
|5 |Update Azure Policy to allow deployment of the following resources in your Azure subscription: <ul><li>Azure Purview</li><li>Azure Storage</li><li>Azure Event Hub (optional)</li></ul> |*Subscription Owner* |Use this step if an existing Azure Policy prevents deploying such Azure resources. If a blocking policy exists and needs to remain in place, please follow our [Azure Purview exception tag guide](create-azure-purview-portal-faq.md) and follow the steps to create an exception for Azure Purview accounts. | |6 | Define your network security requirements. | Network and Security architects. |<ul><li> Review [Azure Purview network architecture and best practices](concept-best-practices-network.md) to define what scenario is more relevant to your network requirements. </li><li>If private network is needed, use [Azure Purview Managed IR](catalog-managed-vnet.md) to scan Azure data sources when possible to reduce complexity and administrative overhead. </li></ul> | |7 |An Azure Virtual Network and Subnet(s) for Azure Purview private endpoints. | *Network Contributor* to create or update Azure VNet. |Use this step if you're planning to deploy [private endpoint connectivity with Azure Purview](catalog-private-link.md): <ul><li>Private endpoints for **Ingestion**.</li><li>Private endpoint for Azure Purview **Account**.</li><li>Private endpoint for Azure Purview **Portal**.</li></ul> <br> Deploy [Azure Virtual Network](../virtual-network/quick-create-portal.md) if you need one. |
This article lists prerequisites that help you get started quickly on Azure Purv
|31 |Connect Azure Data Factory to Azure Purview from Azure Data Factory Portal. **Manage** -> **Azure Purview**. Select **Connect to a Purview account**. <br> Validate if Azure resource tag **catalogUri** exists in ADF Azure resource. |Azure Data Factory Contributor / Data curator |Use this step if you have **Azure Data Factory**. | |32 |Verify if you have at least one **Microsoft 365 required license** in your Azure Active Directory tenant to use sensitivity labels in Azure Purview. |Azure Active Directory *Global Reader* |Perform this step if you're planning in extending **Sensitivity Labels from Microsoft 365 to Azure Purview** <br> For more information, see [licensing requirements to use sensitivity labels on files and database columns in Azure Purview](sensitivity-labels-frequently-asked-questions.yml) | |33 |Consent "**Extend labeling to assets in Azure Purview**" |Compliance Administrator <br> Azure Information Protection Administrator |Use this step if you are interested in extending Sensitivity Labels from Microsoft 365 to Azure Purview. <br> Use this step if you are interested in extending **Sensitivity Labels** from Microsoft 365 to Azure Purview. |
-|34 |Create new collections and assign roles in Azure Purview |*Collection admin* | [Create a collection and assign permissions in Azure Purview](/quickstart-create-collection.md). |
+|34 |Create new collections and assign roles in Azure Purview |*Collection admin* | [Create a collection and assign permissions in Azure Purview](./quickstart-create-collection.md). |
|36 |Register and scan Data Sources in Azure Purview |*Data Source admin* <br> *Data Reader* or *Data Curator* | For more information, see [supported data sources and file types](azure-purview-connector-overview.md) | |35 |Grant access to data roles in the organization |*Collection admin* |Provide access to other teams to use Azure Purview: <ul><li> Data curator</li><li>Data reader</li><li>Collection admin</li><li>Data source admin</li><li>Policy Author</li><li>Workflow admin</li></ul> <br> For more information, see [Access control in Azure Purview](catalog-permissions.md). |
route-server Route Server Faq https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/route-server/route-server-faq.md
Previously updated : 01/26/2022 Last updated : 03/25/2022
No, Azure Route Server supports only 16-bit (2 bytes) ASNs.
### Can I associate a User Defined Route (UDR) to the RouteServerSubnet?
-No, Azure Route Server doesn't support configuring a UDR on the RouteServerSubnet.
+No, Azure Route Server doesn't support configuring a UDR on the RouteServerSubnet. It should be noted that Azure Route Server does not route any data traffic between NVAs and VMs.
### Can I associate a Network Security group (NSG) to the RouteServerSubnet?
No. System routes for traffic related to virtual network, virtual network peerin
You can still use Route Server to direct traffic between subnets in different virtual networks to flow using the NVA. The only possible design that may work is one subnet per "spoke" virtual network and all virtual networks are peered to a "hub" virtual network, but this is very limiting and needs to take into scaling considerations and Azure's maximum limits on virtual networks vs subnets.
+### Can Azure Route Server filter out routes from NVAs?
+
+Azure Route Server supports ***NO_ADVERTISE*** BGP Community. If an NVA advertises routes with this community string to the route server, the route server won't advertise it to other peers including the ExpressRoute gateway. This feature can help reduce the number of routes to be sent from Azure Route Server to ExpressRoute.
+ ## <a name = "limitations"></a>Route Server Limits Azure Route Server has the following limits (per deployment).
search Cognitive Search Custom Skill Interface https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/search/cognitive-search-custom-skill-interface.md
Title: Interface definition for custom skills
+ Title: Custom skill interface
-description: Custom data extraction interface for web-api custom skill in an AI enrichment pipeline in Azure Cognitive Search.
+description: Integrate a custom skill with an AI enrichment pipeline in Azure Cognitive Search through a web interface that defines compatible inputs and outputs in a skillset.
--++ - Previously updated : 01/14/2022+ Last updated : 03/25/2022
-# How to add a custom skill to an Azure Cognitive Search enrichment pipeline
+# Add a custom skill to an Azure Cognitive Search enrichment pipeline
An [enrichment pipeline](cognitive-search-concept-intro.md) can include both [built-in skills](cognitive-search-predefined-skills.md) and [custom skills](cognitive-search-custom-skill-web-api.md) that you personally create and publish. Your custom code executes externally to the search service (for example, as an Azure function), but accepts inputs and sends outputs to the skillset just like any other skill.
Building a custom skill gives you a way to insert transformations unique to your
## Set the endpoint and timeout interval
-The interface for a custom skill is specified through the [Custom WebAPI skill](cognitive-search-custom-skill-web-api.md).
-
-By default, the connection to the endpoint will time out if a response is not returned within a 30-second window. The indexing pipeline is synchronous and indexing will produce a timeout error if a response is not received in that time frame. You can increase the interval to a maximum value of 230 seconds by setting the timeout parameter:
+The interface for a custom skill is specified through the [Custom Web API skill](cognitive-search-custom-skill-web-api.md).
```json
- "@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
- "description": "This skill has a 230 second timeout",
- "uri": "https://[your custom skill uri goes here]",
- "timeout": "PT230S",
+"@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
+"description": "This skill has a 230 second timeout",
+"uri": "https://[your custom skill uri goes here]",
+"authResourceId": "[for managed identity connections, your app's client ID goes here]",
+"timeout": "PT230S",
```
-When setting the URI, make sure the URI is secure (HTTPS).
+The URI is the HTTPS endpoint of your function or app. When setting the URI, make sure the URI is secure (HTTPS). If your code is hosted in an Azure function app, the URI should include an [API key in the header or as a URI parameter](../azure-functions/functions-bindings-http-webhook-trigger.md#api-key-authorization) to authorize the request.
+
+If instead your function or app uses Azure managed identities and Azure roles for authentication and authorization, the custom skill can include an authentication token on the request. The following points describe the requirements for this approach:
+++ The search service, which sends the request on the indexer's behalf, must be [configured to use a managed identity](search-howto-managed-identities-data-sources.md) (either system or user-assigned) so that the caller can be authenticated by Azure Active Directory.+++ Your function or app must be [configured for Azure Active Directory](../app-service/configure-authentication-provider-aad.md).+++ Your [custom skill definition](cognitive-search-custom-skill-web-api.md) must include an "authResourceId" property. This property takes an application (client) ID, in a [supported format](../active-directory/develop/security-best-practices-for-app-registration.md#appid-uri-configuration): `api://<appId>`.+
+By default, the connection to the endpoint will time out if a response is not returned within a 30-second window. The indexing pipeline is synchronous and indexing will produce a timeout error if a response is not received in that time frame. You can increase the interval to a maximum value of 230 seconds by setting the timeout parameter:
## Format Web API inputs
When you create a Web API enricher, you can describe HTTP headers and parameters
"skills": [ { "@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
+ "name": "myCustomSkill"
"description": "This skill calls an Azure function, which in turn calls TA sentiment", "uri": "https://indexer-e2e-webskill.azurewebsites.net/api/DateExtractor?language=en", "context": "/document",
search Cognitive Search Custom Skill Web Api https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/search/cognitive-search-custom-skill-web-api.md
Title: Custom Web API skill in skillsets
description: Extend capabilities of Azure Cognitive Search skillsets by calling out to Web APIs. Use the Custom Web API skill to integrate your custom code. --++ Previously updated : 06/17/2020 Last updated : 03/25/2022 # Custom Web API skill in an Azure Cognitive Search enrichment pipeline The **Custom Web API** skill allows you to extend AI enrichment by calling out to a Web API endpoint providing custom operations. Similar to built-in skills, a **Custom Web API** skill has inputs and outputs. Depending on the inputs, your Web API receives a JSON payload when the indexer runs, and outputs a JSON payload as a response, along with a success status code. The response is expected to have the outputs specified by your custom skill. Any other response is considered an error and no enrichments are performed.
-The structure of the JSON payloads are described further down in this document.
+The structure of the JSON payload is described further down in this document.
> [!NOTE] > The indexer will retry twice for certain standard HTTP status codes returned from the Web API. These HTTP status codes are:
The structure of the JSON payloads are described further down in this document.
> * `503 Service Unavailable` > * `429 Too Many Requests`
-## @odata.type
+## @odata.type
+ Microsoft.Skills.Custom.WebApiSkill ## Skill parameters
Parameters are case-sensitive.
| Parameter name | Description | |--|-|
-| `uri` | The URI of the Web API to which the _JSON_ payload will be sent. Only **https** URI scheme is allowed |
+| `uri` | The URI of the Web API to which the JSON payload will be sent. Only **https** URI scheme is allowed |
+| `authResourceId` | (Optional) A string that if set, indicates that this skill should use a managed identity on the connection to the function or app hosting the code. The value of this property is the application (client) ID of the function or app's registration in Azure Active Directory. This value will be used to scope the authentication token retrieved by the indexer, and will be sent along with the custom Web skill API request to the function or app. Setting this property requires that your search service is [configured for managed identity](search-howto-managed-identities-data-sources.md) and your Azure function app is [configured for an Azure AD login](../app-service/configure-authentication-provider-aad.md). |
| `httpMethod` | The method to use while sending the payload. Allowed methods are `PUT` or `POST` |
-| `httpHeaders` | A collection of key-value pairs where the keys represent header names and values represent header values that will be sent to your Web API along with the payload. The following headers are prohibited from being in this collection: `Accept`, `Accept-Charset`, `Accept-Encoding`, `Content-Length`, `Content-Type`, `Cookie`, `Host`, `TE`, `Upgrade`, `Via` |
+| `httpHeaders` | A collection of key-value pairs where the keys represent header names and values represent header values that will be sent to your Web API along with the payload. The following headers are prohibited from being in this collection: `Accept`, `Accept-Charset`, `Accept-Encoding`, `Content-Length`, `Content-Type`, `Cookie`, `Host`, `TE`, `Upgrade`, `Via`. |
| `timeout` | (Optional) When specified, indicates the timeout for the http client making the API call. It must be formatted as an XSD "dayTimeDuration" value (a restricted subset of an [ISO 8601 duration](https://www.w3.org/TR/xmlschema11-2/#dayTimeDuration) value). For example, `PT60S` for 60 seconds. If not set, a default value of 30 seconds is chosen. The timeout can be set to a maximum of 230 seconds and a minimum of 1 second. |
-| `batchSize` | (Optional) Indicates how many "data records" (see _JSON_ payload structure below) will be sent per API call. If not set, a default of 1000 is chosen. We recommend that you make use of this parameter to achieve a suitable tradeoff between indexing throughput and load on your API |
+| `batchSize` | (Optional) Indicates how many "data records" (see JSON payload structure below) will be sent per API call. If not set, a default of 1000 is chosen. We recommend that you make use of this parameter to achieve a suitable tradeoff between indexing throughput and load on your API. |
| `degreeOfParallelism` | (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The `degreeOfParallelism` can be set to a maximum of 10 and a minimum of 1. | ## Skill inputs
-There are no "predefined" inputs for this skill. You can choose one or more fields that would be already available at the time of this skill's execution as inputs and the _JSON_ payload sent to the Web API will have different fields.
+There are no predefined inputs for this skill. You can choose one or more fields that would be already available at the time of this skill's execution as inputs and the JSON payload sent to the Web API will have different fields.
## Skill outputs
-There are no "predefined" outputs for this skill. Depending on the response your Web API will return, add output fields so that they can be picked up from the _JSON_ response.
-
+There are no predefined outputs for this skill. Depending on the response your Web API will return, add output fields so that they can be picked up from the JSON response.
## Sample definition ```json
- {
- "@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
- "description": "A custom skill that can identify positions of different phrases in the source text",
- "uri": "https://contoso.count-things.com",
- "batchSize": 4,
- "context": "/document",
- "inputs": [
- {
- "name": "text",
- "source": "/document/content"
- },
- {
- "name": "language",
- "source": "/document/languageCode"
- },
- {
- "name": "phraseList",
- "source": "/document/keyphrases"
- }
- ],
- "outputs": [
- {
- "name": "hitPositions"
- }
- ]
- }
+{
+ "@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
+ "description": "A custom skill that can identify positions of different phrases in the source text",
+ "uri": "https://contoso.count-things.com",
+ "batchSize": 4,
+ "context": "/document",
+ "inputs": [
+ {
+ "name": "text",
+ "source": "/document/content"
+ },
+ {
+ "name": "language",
+ "source": "/document/languageCode"
+ },
+ {
+ "name": "phraseList",
+ "source": "/document/keyphrases"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "hitPositions"
+ }
+ ]
+}
```+ ## Sample input JSON structure
-This _JSON_ structure represents the payload that will be sent to your Web API.
+This JSON structure represents the payload that will be sent to your Web API.
It will always follow these constraints:
-* The top-level entity is called `values` and will be an array of objects. The number of such objects will be at most the `batchSize`
-* Each object in the `values` array will have
+* The top-level entity is called `values` and will be an array of objects. The number of such objects will be at most the `batchSize`.
+* Each object in the `values` array will have:
* A `recordId` property that is a **unique** string, used to identify that record.
- * A `data` property that is a _JSON_ object. The fields of the `data` property will correspond to the "names" specified in the `inputs` section of the skill definition. The value of those fields will be from the `source` of those fields (which could be from a field in the document, or potentially from another skill)
+ * A `data` property that is a JSON object. The fields of the `data` property will correspond to the "names" specified in the `inputs` section of the skill definition. The value of those fields will be from the `source` of those fields (which could be from a field in the document, or potentially from another skill).
```json {
It will always follow these constraints:
## Sample output JSON structure
-The "output" corresponds to the response returned from your Web API. The Web API should only return a _JSON_ payload (verified by looking at the `Content-Type` response header) and should satisfy the following constraints:
+The "output" corresponds to the response returned from your Web API. The Web API should only return a JSON payload (verified by looking at the `Content-Type` response header) and should satisfy the following constraints:
* There should be a top-level entity called `values` which should be an array of objects. * The number of objects in the array should be the same as the number of objects sent to the Web API.
The "output" corresponds to the response returned from your Web API. The Web API
* A `data` property, which is an object where the fields are enrichments matching the "names" in the `output` and whose value is considered the enrichment. * An `errors` property, an array listing any errors encountered that will be added to the indexer execution history. This property is required, but can have a `null` value. * A `warnings` property, an array listing any warnings encountered that will be added to the indexer execution history. This property is required, but can have a `null` value.
-* The objects in the `values` array need not be in the same order as the objects in the `values` array sent as a request to the Web API. However, the `recordId` is used for correlation so any record in the response containing a `recordId` which was not part of the original request to the Web API will be discarded.
+* The ordering of objects in the `values` in either the request or response isn't important. However, the `recordId` is used for correlation so any record in the response containing a `recordId` which was not part of the original request to the Web API will be discarded.
```json {
The "output" corresponds to the response returned from your Web API. The Web API
``` ## Error cases+ In addition to your Web API being unavailable, or sending out non-successful status codes the following are considered erroneous cases: * If the Web API returns a success status code but the response indicates that it is not `application/json` then the response is considered invalid and no enrichments will be performed.
-* If there are **invalid** (with `recordId` not in the original request, or with duplicate values) records in the response `values` array, no enrichment will be performed for **those** records.
-For cases when the Web API is unavailable or returns a HTTP error, a friendly error with any available details about the HTTP error will be added to the indexer execution history.
+* If there are invalid records (for example, `recordId` is missing or duplicated) in the response `values` array, no enrichment will be performed for the invalid records.
+
+For cases when the Web API is unavailable or returns an HTTP error, a friendly error with any available details about the HTTP error will be added to the indexer execution history.
## See also
search Search Get Started Bicep https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/search/search-get-started-bicep.md
The Azure resource defined in this Bicep file:
```azurecli az group create --name exampleRG --location eastus
- az deployment group create --resource-group exampleRG --template-file main.bicep
+ az deployment group create --resource-group exampleRG --template-file main.bicep --parameters serviceName=<service-name>
``` # [PowerShell](#tab/PowerShell) ```azurepowershell New-AzResourceGroup -Name exampleRG -Location eastus
- New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep
+ New-AzResourceGroupDeployment -ResourceGroupName exampleRG -TemplateFile ./main.bicep -serviceName "<service-name>"
``` > [!NOTE]
- > You'll be prompted to enter a service name. The service name must only contain lowercase letters, digits, or dashes. You can't use a dash as the first two characters or the last character. The name has a minimum length of 2 characters and a maximum length of 60 characters.
+ > Replace **\<service-name\>** with the name of the Search service. The service name must only contain lowercase letters, digits, or dashes. You can't use a dash as the first two characters or the last character. The name has a minimum length of 2 characters and a maximum length of 60 characters.
When the deployment finishes, you should see a message indicating the deployment succeeded.
search Search Howto Managed Identities Data Sources https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/search/search-howto-managed-identities-data-sources.md
Previously updated : 02/11/2022 Last updated : 03/22/2022 # Connect a search service to other Azure resources using a managed identity
-You can configure an Azure Cognitive Search connection to other Azure resources using a [system-assigned or user-assigned managed identity](../active-directory/managed-identities-azure-resources/overview.md) and an Azure role assignment on the remote service. Managed identities and role assignments eliminate the need for passing secrets and credentials in a connection strings or code.
+You can configure an Azure Cognitive Search service to connect to other Azure resources using a [system-assigned or user-assigned managed identity](../active-directory/managed-identities-azure-resources/overview.md) and an Azure role assignment. Managed identities and role assignments eliminate the need for passing secrets and credentials in a connection string or code.
## Prerequisites + A search service at the [Basic tier or above](search-sku-tier.md).
-+ An Azure resource that accepts incoming requests from a managed identity having a valid role assignment.
++ An Azure resource that accepts incoming requests from an Azure AD login that has a valid role assignment. ## Supported scenarios
-Cognitive Search supports system-assigned managed identity in all scenarios, and user-assigned managed identities in the indexer data access scenario. A user-assigned managed identity is specified through an "identity" property. Currently, only an indexer data source definition has the "identity" property.
+Cognitive Search can use a system-assigned or user-assigned managed identity on outbound connections to Azure resources. A system managed identity is indicated when a connection string is the unique resource ID of an Azure AD-aware service or application. A user managed identity is specified through an "identity" property.
| Scenario | System managed identity | User managed identity (preview) | |-|-|| | [Indexer connections to supported Azure data sources](search-indexer-overview.md) | Yes | Yes |
-| [Azure Key Vault for customer-managed keys](search-security-manage-encryption-keys.md) | Yes | No |
-| [Debug sessions (hosted in Azure Storage)](cognitive-search-debug-session.md)| Yes | No |
-| [Enrichment cache (hosted in Azure Storage)](search-howto-incremental-index.md)| Yes <sup>1</sup>| No |
-| [Knowledge Store (hosted in Azure Storage)](knowledge-store-create-rest.md) | Yes <sup>2</sup>| No |
-| [Custom skills (hosted in Azure Functions or equivalent)](cognitive-search-custom-skill-interface.md) | Yes | No |
+| [Azure Key Vault for customer-managed keys](search-security-manage-encryption-keys.md) | Yes | Yes |
+| [Debug sessions (hosted in Azure Storage)](cognitive-search-debug-session.md) | Yes | No |
+| [Enrichment cache (hosted in Azure Storage)](search-howto-incremental-index.md)| Yes <sup>1,</sup> <sup>2</sup>| Yes |
+| [Knowledge Store (hosted in Azure Storage)](knowledge-store-create-rest.md) | Yes <sup>2</sup>| Yes |
+| [Custom skills (hosted in Azure Functions or equivalent)](cognitive-search-custom-skill-interface.md) | Yes | Yes |
-<sup>1</sup> The Import data wizard doesn't currently accept a system managed identity connection string for incremental enrichment, but after the wizard completes, you can update the indexer JSON definition to include the connection string, and then rerun the indexer.
+<sup>1</sup> The Import data wizard doesn't currently accept a managed identity connection string for incremental enrichment, but after the wizard completes, you can update the connection string in indexer JSON definition to specify the managed identity, and then rerun the indexer.
<sup>2</sup> If your indexer has an attached skillset that writes back to Azure Storage (for example, it creates a knowledge store or caches enriched content), a managed identity won't work if the storage account is behind a firewall or has IP restrictions. This is a known limitation that will be lifted when managed identity support for skillset scenarios becomes generally available. The solution is to use a full access connection string instead of a managed identity if Azure Storage is behind a firewall.
-Debug sessions, enrichment cache, and knowledge store are features that write to Blob Storage. Assign a system managed identity to the **Storage Blob Data Contributor** role to support these features.
+Debug sessions, enrichment cache, and knowledge store are features that write to Blob Storage. Assign a managed identity to the **Storage Blob Data Contributor** role to support these features.
-Knowledge store will also write to Table Storage. Assign a system managed identity to the **Storage Table Data Contributor** role to support table projections.
+Knowledge store will also write to Table Storage. Assign a managed identity to the **Storage Table Data Contributor** role to support table projections.
## Create a system managed identity
See [Create a search service with a system assigned managed identity (Azure CLI)
## Create a user managed identity (preview)
-A user-assigned managed identity is a resource on Azure. It's useful if you need more granularity in role assignments.
-
-Currently in Azure Cognitive Search, user managed identities are supported only for indexer data connections. You can create separate identities for different applications and scenarios that are related to indexer-based indexing.
+A user-assigned managed identity is a resource on Azure. It's useful if you need more granularity in role assignments because you can create separate identities for different applications and scenarios.
> [!IMPORTANT] >This feature is in public preview under [supplemental terms of use](https://azure.microsoft.com/support/legal/preview-supplemental-terms/).
If your Azure resource is behind a firewall, make sure there's an inbound rule t
+ For same-region connections to Azure Blob Storage or Azure Data Lake Storage Gen2, use the [trusted service exception](search-indexer-howto-access-trusted-service-exception.md) to admit requests.
-+ For all other resources and connections, [configure an IP firewall rule](search-indexer-howto-access-ip-restricted.md). See [Indexer access to content protected by Azure network security features](search-indexer-securing-resources.md) for more detail.
++ For all other resources and connections, [configure an IP firewall rule](search-indexer-howto-access-ip-restricted.md) that admits requests from Search. See [Indexer access to content protected by Azure network security features](search-indexer-securing-resources.md) for more detail. ## Assign a role
A managed identity must be paired with an Azure role that determines permissions
+ Data reader permissions are needed for indexer data connections and for accessing a customer-managed key in Azure Key Vault.
-+ Contributor (write) permissions are needed for AI enrichment features that use Azure Storage for hosting session data, caching, and long-term content storage. These features include: enrichment cache, knowledge store, debug session.
++ Contributor (write) permissions are needed for AI enrichment features that use Azure Storage for hosting debug session data, enrichment caching, and long-term content storage in a knowledge store. The following steps are for Azure Storage. If your resource is Cosmos DB or Azure SQL, the steps are similar.
The following steps are for Azure Storage. If your resource is Cosmos DB or Azur
## Connection string examples
-Once a managed identity is defined and given a role assignment, outbound connections use it in connection strings. Here are some examples of connection strings for various scenarios.
+Once a managed identity is defined for the search service and given a role assignment, outbound connections can be modified to use the unique resource ID of the other Azure resource. Here are some examples of connection strings for various scenarios.
[**Blob data source (system):**](search-howto-managed-identities-storage.md)
-An indexer data source includes a "credentials" property that determines how the connection is made to the data source. The following example shows a connection string that uses a system managed identity. Notice that the connection string doesn't include a container. In a data source definition, a container name is specified in the "container" property (not shown), not the connection string.
+An indexer data source includes a "credentials" property that determines how the connection is made to the data source. The following example shows a connection string specifying the unique resource ID of a storage account. Azure AD will authenticate the request using the system managed identity of the search service. Notice that the connection string doesn't include a container. In a data source definition, a container name is specified in the "container" property (not shown), not the connection string.
```json "credentials": {
An indexer data source includes a "credentials" property that determines how the
[**Blob data source (user):**](search-howto-managed-identities-storage.md)
-A user-assigned managed identity is a preview feature. It's specified in an additional "identity" property, currently only supported for indexer data sources. You can use either the portal or the REST API preview version 2021-04-30-Preview to create an indexer data source that supports a user-assigned managed identity.
+A search request to Azure Storage can also be made under a user-assigned managed identity, currently in preview. The search service user identity is specified in the "identity" property. You can use either the portal or the REST API preview version 2021-04-30-Preview to set the identity.
```json "credentials": {
A user-assigned managed identity is a preview feature. It's specified in an addi
[**Knowledge store:**](knowledge-store-create-rest.md)
-A knowledge store definition includes a connection string to Azure Storage. On Azure Storage, a knowledge store will create projections as blobs and tables. The connection string is a straightforward connection to Azure Storage. Notice that the string does not include containers or tables in the path. These are defined in the embedded projection definition, not the connection string.
+A knowledge store definition includes a connection string to Azure Storage. On Azure Storage, a knowledge store will create projections as blobs and tables. The connection string is the unique resource ID of your storage account. Notice that the string does not include containers or tables in the path. These are defined in the embedded projection definition, not the connection string.
```json "knowledgeStore": {
An indexer creates, uses, and remembers the container used for the cached enrich
[**Debug session:**](cognitive-search-debug-session.md)
-A debug session targets a container. Be sure to include the name of an existing container in the connection string. You can paste a string similar to the following example in the debug session that you start up in the portal.
+A debug session runs in the portal and takes a connection string when you start the session. You can paste a string similar to the following example.
```json "ResourceId=/subscriptions/{subscription-ID}/resourceGroups/{resource-group-name}/providers/Microsoft.Storage/storageAccounts/{storage-account-name}/{container-name};", ```
+[**Custom skill:**](cognitive-search-custom-skill-interface.md)
+
+A custom skill targets the endpoint of an Azure function or app hosting custom code. The endpoint is specified in the [custom skill definition](cognitive-search-custom-skill-web-api.md). The presence of the "authResourceId" tells the search service to connect using a managed identity, passing the application ID of the target function or app in the property.
+
+```json
+{
+ "@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
+ "description": "A custom skill that can identify positions of different phrases in the source text",
+ "uri": "https://contoso.count-things.com",
+ "authResourceId": "<Azure-AD-registered-application-ID>",
+ "batchSize": 4,
+ "context": "/document",
+ "inputs": [ ... ],
+ "outputs": [ ...]
+}
+```
+ ## See also + [Security overview](search-security-overview.md)
search Search Security Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/search/search-security-overview.md
Previously updated : 02/16/2022- Last updated : 03/25/2022+ # Security overview for Azure Cognitive Search
This article describes the security features in Azure Cognitive Search that prot
## Data flow (network traffic patterns)
-A search service is hosted on Azure and is typically accessed by client applications using public network connections. While that pattern is predominant, it's not the only traffic pattern that you need to care about. Understanding all points of entry and outbound traffic is necessary background for protecting your development and production environments.
+A Cognitive Search service is hosted on Azure and is typically accessed by client applications over public network connections. While that pattern is predominant, it's not the only traffic pattern that you need to care about. Understanding all points of entry as well as outbound traffic is necessary background for securing your development and production environments.
Cognitive Search has three basic network traffic patterns:
Cognitive Search has three basic network traffic patterns:
Inbound requests that target a search service endpoint consist of:
-+ Creating and managing objects
++ Creating and managing indexes, indexers, and other objects + Sending requests for indexing, running indexer jobs, executing skills + Querying an index
Independent of network security, all inbound requests must be authenticated. Key
Outbound requests from a search service to other applications are typically made by indexers for text-based indexing and some aspects of AI enrichment. Outbound requests include both read and write operations:
-+ Indexers connect to external data sources to pull in data for indexing.
-+ Indexers can also write to Azure Storage when creating knowledge stores, persisting cached enrichments, and persisting debug sessions.
-+ A custom skill runs external code that's hosted off-service. An indexer sends the request for external processing during skillset execution.
++ Search, on behalf of an indexer, connects to external data sources to read in data for indexing.++ Search, on behalf of an indexer, writes to Azure Storage when creating knowledge stores, persisting cached enrichments, and persisting debug sessions.++ A custom skill connects to an Azure function or app to run external code that's hosted off-service. The request for external processing is sent during skillset execution. + Search connects to Azure Key Vault for a customer-managed key used to encrypt and decrypt sensitive data.
-Outbound connections can be made using a full access connection string that includes a shared access key or a database login, or a managed identity if you're using Azure Active Directory.
+Outbound connections can be made using a resource's full access connection string that includes a key or a database login, or a managed identity if you're using Azure Active Directory.
-If your Azure resources are behind a firewall, you'll need to create rules that admit indexer or service requests. For resources protected by Azure Private Link, you can create a shared private link that an indexer uses to make its connection.
+If your Azure resource is behind a firewall, you'll need to create rules that admit search service requests. For resources protected by Azure Private Link, you can create a shared private link that an indexer uses to make its connection.
### Internal traffic
While this solution is the most secure, using additional services is an added co
## Authentication
-Once a request is admitted, it must still undergo authentication and authorization that determines whether the request is permitted.
+Once a request is admitted, it must still undergo authentication and authorization that determines whether the request is permitted. Cognitive Search supports two approaches:
-For inbound requests to the search service, authentication is on the request (not the calling app or user) through an [API key](search-security-api-keys.md), where the key is a string composed of randomly generated numbers and letters) that proves the request is from a trustworthy source. Keys are required on every request. Submission of a valid key is considered proof the request originates from a trusted entity.
++ [Key-based authentication](search-security-api-keys.md) is performed on the request (not the calling app or user) through an API key, where the key is a string composed of randomly generated numbers and letters that prove the request is from a trustworthy source. Keys are required on every request. Submission of a valid key is considered proof the request originates from a trusted entity.
-Alternatively, there's new support for Azure Active Directory authentication and role-based authorization, [currently in preview](search-security-rbac.md), that establishes the caller (and not the request) as the authenticated identity.
++ [Azure AD authentication (preview)](search-security-rbac.md) establishes the caller (and not the request) as the authenticated identity. An Azure role assignment determines the allowed operation.
-Outbound requests made by an indexer are subject to the authentication protocols supported by the external service. The indexer subservice in Cognitive Search can be made a trusted service on Azure, connecting to other services using a managed identity. For more information, see [Set up an indexer connection to a data source using a managed identity](search-howto-managed-identities-data-sources.md).
+Outbound requests made by an indexer are subject to the authentication protocols supported by the external service. A search service can be made a trusted service on Azure, connecting to other services using a system or user managed identity. For more information, see [Set up an indexer connection to a data source using a managed identity](search-howto-managed-identities-data-sources.md).
## Authorization
Cognitive Search provides different authorization models for content management
### Authorization for content management
-Authorization to content, and operations related to content, is either write access, as conferred through the [API key](search-security-api-keys.md) provided on the request. The API key is an authentication mechanism, but also authorizes access depending on the type of API key.
+If you're using key-based authentication, authorization on content operations is conferred through the type of [API key](search-security-api-keys.md) on the request:
+ Admin key (allows read-write access for create-read-update-delete operations on the search service), created when the service is provisioned
Authorization to content, and operations related to content, is either write acc
In application code, you specify the endpoint and an API key to allow access to content and options. An endpoint might be the service itself, the indexes collection, a specific index, a documents collection, or a specific document. When chained together, the endpoint, the operation (for example, a create or update request) and the permission level (full or read-only rights based on the key) constitute the security formula that protects content and operations.
-> [!NOTE]
-> Authorization for data plane operations using Azure role-based access control (RBAC) is now in preview. You can use this preview capability if you want to [use role assignments instead of API keys](search-security-rbac.md).
+If you're using Azure AD authentication, [use role assignments instead of API keys](search-security-rbac.md) to establish who and what can read and write to your search service.
### Controlling access to indexes
-In Azure Cognitive Search, an individual index is not a securable object. Instead, access to an index is determined at the service layer (read or write access based on which API key you provide), along with the context of an operation.
+In Azure Cognitive Search, an individual index is generally not a securable object. As noted previously for key-based authentication, access to an index will include read or write permissions based on which API key you provide on the request, along with the context of an operation. In a query request, there is no concept of joining indexes or accessing multiple indexes simultaneously so all requests target a single index by definition. As such, construction of the query request itself (a key plus a single target index) defines the security boundary.
-For read-only access, you can structure query requests to connect using a [query key](search-security-api-keys.md), and include the specific index used by your app. In a query request, there is no concept of joining indexes or accessing multiple indexes simultaneously so all requests target a single index by definition. As such, construction of the query request itself (a key plus a single target index) defines the security boundary.
+If you're using Azure roles, you can [set permissions on individual indexes](search-security-rbac.md#grant-access-to-a-single-index) as long as it's done programmatically.
-Administrator and developer access to indexes is undifferentiated: both need write access to create, delete, and update objects managed by the service. Anyone with an [admin key](search-security-api-keys.md) to your service can read, modify, or delete any index in the same service. For protection against accidental or malicious deletion of indexes, your in-house source control for code assets is the remedy for reversing an unwanted index deletion or modification. Azure Cognitive Search has failover within the cluster to ensure availability, but it does not store or execute your proprietary code used to create or load indexes.
+For key-based authentication scenarios, administrator and developer access to indexes is undifferentiated: both need write access to create, delete, and update the objects managed by the service. Anyone with an [admin key](search-security-api-keys.md) to your service can read, modify, or delete any index in the same service. For protection against accidental or malicious deletion of indexes, your in-house source control for code assets is the solution for reversing an unwanted index deletion or modification. Azure Cognitive Search has failover within the cluster to ensure availability, but it doesn't store or execute your proprietary code used to create or load indexes.
For multitenancy solutions requiring security boundaries at the index level, such solutions typically include a middle tier, which customers use to handle index isolation. For more information about the multitenant use case, see [Design patterns for multitenant SaaS applications and Azure Cognitive Search](search-modeling-multitenant-saas-applications.md).
For multitenancy solutions requiring security boundaries at the index level, suc
If you require granular, per-user control over search results, you can build security filters on your queries, returning documents associated with a given security identity.
-Conceptually equivalent to "row-level security", authorization to content within the index is not natively supported using predefined roles or role assignments that map to entities in Azure Active Directory. Any user permissions on data in external systems, such as Cosmos DB, do not transfer with that data as its being indexed by Cognitive Search.
+Conceptually equivalent to "row-level security", authorization to content within the index isn't natively supported using predefined roles or role assignments that map to entities in Azure Active Directory. Any user permissions on data in external systems, such as Cosmos DB, don't transfer with that data as its being indexed by Cognitive Search.
Workarounds for solutions that require "row-level security" include creating a field in the data source that represents a security group or user identity, and then using filters in Cognitive Search to selectively trims search results of documents and content based on identities. The following table describes two approaches for trimming search results of unauthorized content.
Workarounds for solutions that require "row-level security" include creating a f
Service Management operations are authorized through [Azure role-based access control (Azure RBAC)](../role-based-access-control/overview.md). Azure RBAC is an authorization system built on [Azure Resource Manager](../azure-resource-manager/management/overview.md) for provisioning of Azure resources.
-In Azure Cognitive Search, Resource Manager is used to create or delete the service, manage API keys, and scale the service. As such, Azure role assignments will determine who can perform those tasks, regardless of whether they are using the [portal](search-manage.md), [PowerShell](search-manage-powershell.md), or the [Management REST APIs](/rest/api/searchmanagement).
+In Azure Cognitive Search, Resource Manager is used to create or delete the service, manage API keys, and scale the service. As such, Azure role assignments will determine who can perform those tasks, regardless of whether they're using the [portal](search-manage.md), [PowerShell](search-manage-powershell.md), or the [Management REST APIs](/rest/api/searchmanagement).
[Three basic roles](search-security-rbac.md) are defined for search service administration. The role assignments can be made using any supported methodology (portal, PowerShell, and so forth) and are honored service-wide. The Owner and Contributor roles can perform a variety of administration functions. You can assign the Reader role to users who only view essential information.
Reliance on API key-based authentication means that you should have a plan for r
### Activity and diagnostic logs
-Cognitive Search does not log user identities so you can't refer to logs for information about a specific user. However, the service does log create-read-update-delete operations, which you might be able to correlate with other logs to understand the agency of specific actions.
+Cognitive Search doesn't log user identities so you can't refer to logs for information about a specific user. However, the service does log create-read-update-delete operations, which you might be able to correlate with other logs to understand the agency of specific actions.
Using alerts and the logging infrastructure in Azure, you can pick up on query volume spikes or other actions that deviate from expected workloads. For more information about setting up logs, see [Collect and analyze log data](monitor-azure-cognitive-search.md) and [Monitor query requests](search-monitor-queries.md). ### Certifications and compliance
-Azure Cognitive Search participates in regular audits, and has been certified against a number of global, regional, and industry-specific standards for both the public cloud and Azure Government. For the complete list, download the [**Microsoft Azure Compliance Offerings** whitepaper](https://azure.microsoft.com/resources/microsoft-azure-compliance-offerings/) from the official Audit reports page.
+Azure Cognitive Search participates in regular audits, and has been certified against many global, regional, and industry-specific standards for both the public cloud and Azure Government. For the complete list, download the [**Microsoft Azure Compliance Offerings** whitepaper](https://azure.microsoft.com/resources/microsoft-azure-compliance-offerings/) from the official Audit reports page.
For compliance, you can use [Azure Policy](../governance/policy/overview.md) to implement the high-security best practices of [Azure Security Benchmark](../security/benchmarks/introduction.md). Azure Security Benchmark is a collection of security recommendations, codified into security controls that map to key actions you should take to mitigate threats to services and data. There are currently 11 security controls, including [Network Security](../security/benchmarks/security-control-network-security.md), [Logging and Monitoring](../security/benchmarks/security-control-logging-monitoring.md), and [Data Protection](../security/benchmarks/security-control-data-protection.md) to name a few.
-Azure Policy is a capability built into Azure that helps you manage compliance for multiple standards, including those of Azure Security Benchmark. For well-known benchmarks, Azure Policy provides built-in definitions that provide both criteria as well as an actionable response that addresses non-compliance.
+Azure Policy is a capability built into Azure that helps you manage compliance for multiple standards, including those of Azure Security Benchmark. For well-known benchmarks, Azure Policy provides built-in definitions that provide both criteria and an actionable response that addresses non-compliance.
For Azure Cognitive Search, there's currently one built-in definition. It's for diagnostic logging. With this built-in, you can assign a policy that identifies any search service that is missing diagnostic logging, and then turns it on. For more information, see [Azure Policy Regulatory Compliance controls for Azure Cognitive Search](security-controls-policy.md).
sentinel Data Connectors Reference https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/sentinel/data-connectors-reference.md
See [Microsoft Defender for Cloud](#microsoft-defender-for-cloud).
> [!NOTE] > The Azure Information Protection (AIP) data connector uses the AIP audit logs (public preview) feature. As of **March 18, 2022**, we are sunsetting the AIP analytics and audit logs public preview, and moving forward will be using the [Microsoft 365 auditing solution](/microsoft-365/compliance/auditing-solutions-overview). Full retirement is scheduled for **September 30, 2022**. >
-> For more information, see [Removed and retired services](/azure/information-protection/removed-sunset-services.md#azure-information-protection-analytics).
+> For more information, see [Removed and retired services](/azure/information-protection/removed-sunset-services#azure-information-protection-analytics).
> ## Azure Key Vault
service-bus-messaging Service Bus Java How To Use Queues https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/service-bus-messaging/service-bus-java-how-to-use-queues.md
Title: Get started with Azure Service Bus queues (Java) description: This tutorial shows you how to send messages to and receive messages from Azure Service Bus queues using the Java programming language. Previously updated : 02/13/2021 Last updated : 03/24/2022 ms.devlang: java
If you are using Eclipse and created a Java console application, convert your Ja
</plugins> </build> <dependencies>
- <dependency>
- <groupId>com.azure</groupId>
- <artifactId>azure-core</artifactId>
- <version>1.13.0</version>
- </dependency>
<dependency> <groupId>com.azure</groupId> <artifactId>azure-messaging-servicebus</artifactId>
- <version>7.0.2</version>
+ <version>7.7.0</version>
</dependency> </dependencies> </project>
service-bus-messaging Service Bus Java How To Use Topics Subscriptions https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/service-bus-messaging/service-bus-java-how-to-use-topics-subscriptions.md
Title: Get started with Azure Service Bus topics (Java) description: This tutorial shows you how to send messages to Azure Service Bus topics and receive messages from topics' subscriptions using the Java programming language. Previously updated : 02/13/2021 Last updated : 03/24/2022 ms.devlang: java
If you are using Eclipse and created a Java console application, convert your Ja
</plugins> </build> <dependencies>
- <dependency>
- <groupId>com.azure</groupId>
- <artifactId>azure-core</artifactId>
- <version>1.13.0</version>
- </dependency>
<dependency> <groupId>com.azure</groupId> <artifactId>azure-messaging-servicebus</artifactId>
- <version>7.0.2</version>
+ <version>7.7.0</version>
</dependency> </dependencies> </project>
service-fabric Service Fabric Application Upgrade Tutorial Powershell https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/service-fabric/service-fabric-application-upgrade-tutorial-powershell.md
A monitored application upgrade can be performed using the managed or native API
With Service Fabric monitored rolling upgrades, the application administrator can configure the health evaluation policy that Service Fabric uses to determine if the application is healthy. In addition, the administrator can configure the action to be taken when the health evaluation fails (for example, doing an automatic rollback.) This section walks through a monitored upgrade for one of the SDK samples that uses PowerShell.
-[Check this page for a training video that also walks you through an application upgrade:](/shows/building-microservices-applications-on-azure-service-fabric/upgrading-an-application.md)
--
+[Check this page for a training video that also walks you through an application upgrade:](/shows/building-microservices-applications-on-azure-service-fabric/upgrading-an-application)
> [!NOTE] > [ApplicationParameter](/dotnet/api/system.fabric.description.applicationdescription.applicationparameters#System_Fabric_Description_ApplicationDescription_ApplicationParameters)s are not preserved across an application upgrade. In order to preserve current application parameters, the user should get the parameters first and pass them into the upgrade API call like below: ```powershell
service-fabric Service Fabric Cluster Resource Manager Introduction https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/service-fabric/service-fabric-cluster-resource-manager-introduction.md
The Cluster Resource Manager is the system component that handles orchestration
2. Optimizing Your Environment 3. Helping with Other Processes
-[Check this page for a training video to understand how the Cluster Resource Manager works:](/shows/building-microservices-applications-on-azure-service-fabric/cluster-resource-manager.md)
+[Check this page for a training video to understand how the Cluster Resource Manager works:](/shows/building-microservices-applications-on-azure-service-fabric/cluster-resource-manager)
### What it isnΓÇÖt In traditional N tier applications, there's always a [Load Balancer](https://en.wikipedia.org/wiki/Load_balancing_(computing)). Usually this was a Network Load Balancer (NLB) or an Application Load Balancer (ALB) depending on where it sat in the networking stack. Some load balancers are Hardware-based like F5ΓÇÖs BigIP offering, others are software-based such as MicrosoftΓÇÖs NLB. In other environments, you might see something like HAProxy, nginx, Istio, or Envoy in this role. In these architectures, the job of load balancing is to ensure stateless workloads receive (roughly) the same amount of work. Strategies for balancing load varied. Some balancers would send each different call to a different server. Others provided session pinning/stickiness. More advanced balancers use actual load estimation or reporting to route a call based on its expected cost and current machine load.
service-fabric Service Fabric Common Questions https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/service-fabric/service-fabric-common-questions.md
Follow the [Service Fabric blog](https://techcommunity.microsoft.com/t5/azure-se
## Next steps
-Learn about [core Service Fabric concepts](service-fabric-technical-overview.md) and [best practices](./service-fabric-best-practices-security.md)
+Learn about [Service Fabric runtime concepts and best practices](/shows/building-microservices-applications-on-azure-service-fabric/run-time-concepts)
service-fabric Service Fabric Package Apps https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/service-fabric/service-fabric-package-apps.md
The folders are named to match the **Name** attributes of each corresponding ele
Typical scenarios for using **SetupEntryPoint** are when you need to run an executable before the service starts or you need to perform an operation with elevated privileges. For example:
-* Setting up and initializing environment variables that the service executable needs. It is not limited to only executables written via the Service Fabric programming models. For example, npm.exe needs some environment variables configured for deploying a node.js application.
+* Setting up and initializing environment variables that the service executable needs. It is not limited to only executables written via the Service Fabric programming models. For example, npm.exe needs some environment variables configured for deploying a Node.js application.
* Setting up access control by installing security certificates. For more information on how to configure the **SetupEntryPoint**, see [Configure the policy for a service setup entry point](service-fabric-application-runas-security.md)
storage Storage Files Identity Auth Active Directory Domain Service Enable https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/storage/files/storage-files-identity-auth-active-directory-domain-service-enable.md
# Enable Azure Active Directory Domain Services authentication on Azure Files
-[Azure Files](storage-files-introduction.md) supports identity-based authentication over Server Message Block (SMB) through two types of Domain
+[Azure Files](storage-files-introduction.md) supports identity-based authentication over Server Message Block (SMB) through two types of Domain
If you are new to Azure file shares, we recommend reading our [planning guide](storage-files-planning.md) before reading the following series of articles.
The following diagram illustrates the end-to-end workflow for enabling Azure AD
By default, Azure AD DS authentication uses Kerberos RC4 encryption. To use Kerberos AES256 instead, follow these steps:
-As an Azure AD DS user with the required permissions (typically, members of the **AAD DC Administrators** group will have the necessary permissions, open the Azure cloud shell.
+As an Azure AD DS user with the required permissions (typically, members of the **AAD DC Administrators** group will have the necessary permissions), open the Azure cloud shell.
Execute the following commands:
storage Storage Files Prevent File Share Deletion https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/storage/files/storage-files-prevent-file-share-deletion.md
When you initially enable soft delete, we recommend using a small retention peri
## Next steps To learn how to enable and use soft delete, continue to [Enable soft delete](storage-files-enable-soft-delete.md).+
+To learn how to prevent a storage account from being deleted or modified, see [Apply an Azure Resource Manager lock to a storage account](../common/lock-account-resource.md).
+
+To learn how to apply locks to resources and resource groups, see [Lock resources to prevent unexpected changes](../../azure-resource-manager/management/lock-resources.md).
stream-analytics Debug User Defined Functions https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/stream-analytics/debug-user-defined-functions.md
exports.data = methods;
## Install debug support
-To debug, you must [download](https://nodejs.org/en/download/) and install **node.js**. Install the correct version according to the platform you're using. After you install the node.js runtime, restart Visual Studio Code to implement the changes.
+To debug, you must [download](https://nodejs.org/en/download/) and install **node.js**. Install the correct version according to the platform you're using. After you install the Node.js runtime, restart Visual Studio Code to implement the changes.
-Select **Run and Debug** or press **CTRL + SHIFT + D** to start debugging. A combo box appears where you can select **node.js** as the runtime. If you only have node.js installed, it is used by default. You should be able to step through the code and into the satellite file if needed with F11.
+Select **Run and Debug** or press **CTRL + SHIFT + D** to start debugging. A combo box appears where you can select **node.js** as the runtime. If you only have Node.js installed, it is used by default. You should be able to step through the code and into the satellite file if needed with F11.
> [!div class="mx-imgBorder"] > ![Stream Analytics run and debug udf](./media/debug-user-defined-functions/run-debug-udf.png)
exports.data = methods;
> [!div class="mx-imgBorder"] > ![Code added to UDA](./media/debug-user-defined-functions/uda-expose-methods.png)
-Select **Run and Debug** or press **CTRL + SHIFT + D** to start debugging. A combo box appears where you can select **node.js** as the runtime. If you only have node.js installed, it is used by default. You should be able to step through the code and into the satellite file if needed with F11.
+Select **Run and Debug** or press **CTRL + SHIFT + D** to start debugging. A combo box appears where you can select **node.js** as the runtime. If you only have Node.js installed, it is used by default. You should be able to step through the code and into the satellite file if needed with F11.
> [!div class="mx-imgBorder"] > ![Stream Analytics run and debug uda](./media/debug-user-defined-functions/run-debug-uda.png)
stream-analytics Machine Learning Udf https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/stream-analytics/machine-learning-udf.md
Complete the following steps before you add a machine learning model as a functi
1. Use Azure Machine Learning to [deploy your model as a web service](../machine-learning/how-to-deploy-and-where.md).
-2. Your machine learning endpoint must have an associated [swagger](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-advanced-entry-script) that helps Stream Analytics understand the schema of the input and output. You can use this [sample swagger definition](https://github.com/Azure/azure-stream-analytics/blob/master/Samples/AzureML/asa-mlswagger.json) as a reference to ensure you have set it up correctly.
+2. Your machine learning endpoint must have an associated [swagger](../machine-learning/how-to-deploy-advanced-entry-script.md) that helps Stream Analytics understand the schema of the input and output. You can use this [sample swagger definition](https://github.com/Azure/azure-stream-analytics/blob/master/Samples/AzureML/asa-mlswagger.json) as a reference to ensure you have set it up correctly.
3. Make sure your web service accepts and returns JSON serialized data.
FROM input
WHERE <model-specific-data-structure> is not null ```
-If your input data sent to the ML UDF is inconsistent with the schema that is expected, the endpoint will return a response with error code 400, which will cause your Stream Analytics job to go to a failed state. It is recommended that you [enable resource logs](https://docs.microsoft.com/azure/stream-analytics/stream-analytics-job-diagnostic-logs#send-diagnostics-to-azure-monitor-logs) for your job, which will enable you to easily debug and troubleshoot such problems. Therefore, it is strongly recommended that you:
+If your input data sent to the ML UDF is inconsistent with the schema that is expected, the endpoint will return a response with error code 400, which will cause your Stream Analytics job to go to a failed state. It is recommended that you [enable resource logs](stream-analytics-job-diagnostic-logs.md#send-diagnostics-to-azure-monitor-logs) for your job, which will enable you to easily debug and troubleshoot such problems. Therefore, it is strongly recommended that you:
- Validate input to your ML UDF is not null - Validate the type of every field that is an input to your ML UDF to ensure it matches what the endpoint expects
After you have deployed your web service, you send sample request with varying b
At optimal scaling, your Stream Analytics job should be able to send multiple parallel requests to your web service and get a response within few milliseconds. The latency of the web service's response can directly impact the latency and performance of your Stream Analytics job. If the call from your job to the web service takes a long time, you will likely see an increase in watermark delay and may also see an increase in the number of backlogged input events.
-You can achieve low latency by ensuring that your Azure Kubernetes Service (AKS) cluster has been provisioned with the [right number of nodes and replicas](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-azure-kubernetes-service?tabs=python#autoscaling). It's critical that your web service is highly available and returns successful responses. If your job receives an error that is retriable such as service unavailable response (503), it will automaticaly retry with exponential back off. If your job receives one of these errors as a response from the endpoint, the job will go to a failed state.
+You can achieve low latency by ensuring that your Azure Kubernetes Service (AKS) cluster has been provisioned with the [right number of nodes and replicas](../machine-learning/how-to-deploy-azure-kubernetes-service.md?tabs=python#autoscaling). It's critical that your web service is highly available and returns successful responses. If your job receives an error that is retriable such as service unavailable response (503), it will automaticaly retry with exponential back off. If your job receives one of these errors as a response from the endpoint, the job will go to a failed state.
* Bad Request (400) * Conflict (409) * Not Found (404)
stream-analytics Stream Analytics Use Reference Data https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/stream-analytics/stream-analytics-use-reference-data.md
Title: Use reference data for lookups in Azure Stream Analytics
-description: This article describes how to use reference data to lookup or correlate data in an Azure Stream Analytics job's query design.
+description: This article describes how to use reference data to look up or correlate data in an Azure Stream Analytics job's query design.
Last updated 06/25/2021
-# Using reference data for lookups in Stream Analytics
+# Use reference data for lookups in Stream Analytics
-Reference data (also known as a lookup table) is a finite data set that is static or slowly changing in nature, used to perform a lookup or to augment your data streams. For example, in an IoT scenario, you could store metadata about sensors (which donΓÇÖt change often) in reference data and join it with real time IoT data streams. Azure Stream Analytics loads reference data in memory to achieve low latency stream processing. To make use of reference data in your Azure Stream Analytics job, you will generally use a [Reference Data Join](/stream-analytics-query/reference-data-join-azure-stream-analytics) in your query.
+Reference data is a finite dataset that's static or slowly changing in nature. It's used to perform a lookup or to augment your data streams. Reference data is also known as a lookup table.
+
+Take an IoT scenario as an example. You could store metadata about sensors, which don't change often, in reference data. Then you could join it with real-time IoT data streams.
+
+Azure Stream Analytics loads reference data in memory to achieve low-latency stream processing. To make use of reference data in your Stream Analytics job, you'll generally use a [reference data join](/stream-analytics-query/reference-data-join-azure-stream-analytics) in your query.
+
+## Example
+
+You can have a real-time stream of events generated when cars pass a tollbooth. The tollbooth can capture the license plates in real time. That data can join with a static dataset that has registration details to identify license plates that have expired.
-## Example
-You can have a real time stream of events generated when cars pass a toll booth. The toll booth can capture the license plate in real time and join with a static dataset that has registration details to identify license plates that have expired.
-
```SQL SELECT I1.EntryTime, I1.LicensePlate, I1.TollId, R.RegistrationId FROM Input1 I1 TIMESTAMP BY EntryTime JOIN Registration R ON I1.LicensePlate = R.LicensePlate WHERE R.Expired = '1'
-```
+```
+
+Stream Analytics supports Azure Blob Storage and Azure SQL Database as the storage layer for reference data. You can also transform or copy reference data to Blob Storage from Azure Data Factory to use [cloud-based and on-premises data stores](../data-factory/copy-activity-overview.md).
-Stream Analytics supports Azure Blob storage and Azure SQL Database as the storage layer for Reference Data. You can also transform and/or copy reference data to Blob storage from Azure Data Factory to use [any number of cloud-based and on-premises data stores](../data-factory/copy-activity-overview.md).
+## Azure Blob Storage
-## Azure Blob storage
+Reference data is modeled as a sequence of blobs in ascending order of the date/time specified in the blob name. Blobs can only be added to the end of the sequence by using a date/time *greater* than the one specified by the last blob in the sequence. Blobs are defined in the input configuration.
-Reference data is modeled as a sequence of blobs (defined in the input configuration) in ascending order of the date/time specified in the blob name. It **only** supports adding to the end of the sequence by using a date/time **greater** than the one specified by the last blob in the sequence. For more information, see [Use reference data from a Blob Storage for an Azure Stream Analytics job](data-protection.md).
+For more information, see [Use reference data from Blob Storage for a Stream Analytics job](data-protection.md).
### Configure blob reference data
-To configure your reference data, you first need to create an input that is of type **Reference Data**. The table below explains each property that you will need to provide while creating the reference data input with its description:
+To configure your reference data, you first need to create an input that's of the type *reference data*. The following table explains each property you need to provide when you create the reference data input with its description.
-|**Property Name** |**Description** |
+|Property name |Description |
|||
-|Input Alias | A friendly name that will be used in the job query to reference this input. |
-|Storage Account | The name of the storage account where your blobs are located. If itΓÇÖs in the same subscription as your Stream Analytics Job, you can select it from the drop-down. |
-|Storage Account Key | The secret key associated with the storage account. This gets automatically populated if the storage account is in the same subscription as your Stream Analytics job. |
-|Storage Container | Containers provide a logical grouping for blobs stored in the Microsoft Azure Blob service. When you upload a blob to the Blob service, you must specify a container for that blob. |
-|Path Pattern | This is a required property that is used to locate your blobs within the specified container. Within the path, you may choose to specify one or more instances of the following 2 variables:<BR>{date}, {time}<BR>Example 1: products/{date}/{time}/product-list.csv<BR>Example 2: products/{date}/product-list.csv<BR>Example 3: product-list.csv<BR><br> If the blob doesn't exist in the specified path, the Stream Analytics job will wait indefinitely for the blob to become available. |
-|Date Format [optional] | If you have used {date} within the Path Pattern that you specified, then you can select the date format in which your blobs are organized from the drop-down of supported formats.<BR>Example: YYYY/MM/DD, MM/DD/YYYY, etc. |
-|Time Format [optional] | If you have used {time} within the Path Pattern that you specified, then you can select the time format in which your blobs are organized from the drop-down of supported formats.<BR>Example: HH, HH/mm, or HH-mm. |
-|Event Serialization Format | To make sure your queries work the way you expect, Stream Analytics needs to know which serialization format you're using for incoming data streams. For Reference Data, the supported formats are CSV and JSON. |
+|Input alias | A friendly name used in the job query to reference this input. |
+|Storage account | The name of the storage account where your blobs are located. If it's in the same subscription as your Stream Analytics job, select it from the dropdown list. |
+|Storage account key | The secret key associated with the storage account. This key is automatically populated if the storage account is in the same subscription as your Stream Analytics job. |
+|Storage container | Containers provide a logical grouping for blobs stored in Blob Storage. When you upload a blob to Blob Storage, you must specify a container for that blob. |
+|Path pattern | This required property is used to locate your blobs within the specified container. Within the path, you might choose to specify one or more instances of the variables {date} and {time}.<BR>Example 1: products/{date}/{time}/product-list.csv<BR>Example 2: products/{date}/product-list.csv<BR>Example 3: product-list.csv<BR><br> If the blob doesn't exist in the specified path, the Stream Analytics job waits indefinitely for the blob to become available. |
+|Date format [optional] | If you used {date} within the path pattern you specified, select the date format in which your blobs are organized from the dropdown list of supported formats.<BR>Example: YYYY/MM/DD or MM/DD/YYYY |
+|Time format [optional] | If you used {time} within the path pattern you specified, select the time format in which your blobs are organized from the dropdown list of supported formats.<BR>Example: HH, HH/mm, or HH-mm |
+|Event serialization format | To make sure your queries work the way you expect, Stream Analytics needs to know which serialization format you're using for incoming data streams. For reference data, the supported formats are CSV and JSON. |
|Encoding | UTF-8 is the only supported encoding format at this time. | ### Static reference data
-If your reference data is not expected to change, then support for static reference data is enabled by specifying a static path in the input configuration. Azure Stream Analytics picks up the blob from the specified path. {date} and {time} substitution tokens aren't required. Because reference data is immutable in Stream Analytics, overwriting a static reference data blob is not recommended.
+Your reference data might not be expected to change. To enable support for static reference data, specify a static path in the input configuration.
+
+Stream Analytics picks up the blob from the specified path. The {date} and {time} substitution tokens aren't required. Because reference data is immutable in Stream Analytics, overwriting a static reference data blob isn't recommended.
### Generate reference data on a schedule
-If your reference data is a slowly changing data set, then support for refreshing reference data is enabled by specifying a path pattern in the input configuration using the {date} and {time} substitution tokens. Stream Analytics picks up the updated reference data definitions based on this path pattern. For example, a pattern of `sample/{date}/{time}/products.csv` with a date format of **"YYYY-MM-DD"** and a time format of **"HH-mm"** instructs Stream Analytics to pick up the updated blob `sample/2015-04-16/17-30/products.csv` at 5:30 PM on April 16th, 2015 UTC time zone.
+Your reference data might be a slowly changing dataset. To refresh reference data, specify a path pattern in the input configuration by using the {date} and {time} substitution tokens. Stream Analytics picks up the updated reference data definitions based on this path pattern.
+
+For example, a pattern of `sample/{date}/{time}/products.csv` with a date format of YYYY-MM-DD and a time format of HH-mm instructs Stream Analytics to pick up the updated blob `sample/2015-04-16/17-30/products.csv` on April 16, 2015, at 5:30 PM UTC.
-Azure Stream Analytics automatically scans for refreshed reference data blobs at a one minute interval. If a blob with timestamp 10:30:00 is uploaded with a small delay (for example, 10:30:30), you will notice a small delay in Stream Analytics job referencing this blob. To avoid such scenarios, it is recommended to upload the blob earlier than the target effective time (10:30:00 in this example) to allow the Stream Analytics job enough time to discover and load it in memory and perform operations.
+Stream Analytics automatically scans for refreshed reference data blobs at a one-minute interval. A blob with the timestamp 10:30:00 might be uploaded with a small delay, for example, 10:30:30. You'll notice a small delay in the Stream Analytics job referencing this blob.
+
+To avoid such scenarios, upload the blob earlier than the target effective time, which is 10:30:00 in this example. The Stream Analytics job now has enough time to discover and load the blob in memory and perform operations.
> [!NOTE]
-> Currently Stream Analytics jobs look for the blob refresh only when the machine time advances to the time encoded in the blob name. For example, the job will look for `sample/2015-04-16/17-30/products.csv` as soon as possible but no earlier than 5:30 PM on April 16th, 2015 UTC time zone. It will *never* look for a blob with an encoded time earlier than the last one that is discovered.
->
-> For example, once the job finds the blob `sample/2015-04-16/17-30/products.csv` it will ignore any files with an encoded date earlier than 5:30 PM April 16th, 2015 so if a late arriving `sample/2015-04-16/17-25/products.csv` blob gets created in the same container the job will not use it.
->
-> Likewise if `sample/2015-04-16/17-30/products.csv` is only produced at 10:03 PM April 16th, 2015 but no blob with an earlier date is present in the container, the job will use this file starting at 10:03 PM April 16th, 2015 and use the previous reference data until then.
->
-> An exception to this is when the job needs to re-process data back in time or when the job is first started. At start time the job is looking for the most recent blob produced before the job start time specified. This is done to ensure that there is a **non-empty** reference data set when the job starts. If one cannot be found, the job displays the following diagnostic: `Initializing input without a valid reference data blob for UTC time <start time>`.
+> Currently, Stream Analytics jobs look for the blob refresh only when the machine time advances to the time encoded in the blob name. For example, the job looks for `sample/2015-04-16/17-30/products.csv` as soon as possible but no earlier than April 16, 2015, at 5:30 PM UTC. It will *never* look for a blob with an encoded time earlier than the last one that's discovered.
+>
+> For example, after the job finds the blob `sample/2015-04-16/17-30/products.csv`, it ignores any files with an encoded date earlier than April 16, 2015, at 5:30 PM. If a late-arriving `sample/2015-04-16/17-25/products.csv` blob gets created in the same container, the job won't use it.
+>
+> In another example, `sample/2015-04-16/17-30/products.csv` is only produced on April 16, 2015, at 10:03 PM, but no blob with an earlier date is present in the container. Then the job uses this file starting on April 16, 2015, at 10:03 PM and uses the previous reference data until then.
+>
+> An exception to this behavior is when the job needs to reprocess data back in time or when the job is first started.
+
+At start time, the job looks for the most recent blob produced before the job start time specified. This behavior ensures there's a *non-empty* reference dataset when the job starts. If one can't be found, the job displays the following diagnostic: `Initializing input without a valid reference data blob for UTC time <start time>`.
-When a reference data set is refreshed, a diagnostic log will be generated: `Loaded new reference data from <blob path>`. Multiple reasons may require a job to reload a previous (past) reference data set, most often to reprocess past data. That same diagnostic log will be generated then. This doesn't imply that current stream data will use past reference data.
+When a reference dataset is refreshed, a diagnostic log is generated: `Loaded new reference data from <blob path>`. For many reasons, a job might need to reload a previous reference dataset. Most often, the reason is to reprocess past data. The same diagnostic log is generated at that time. This action doesn't imply that current stream data will use past reference data.
-[Azure Data Factory](https://azure.microsoft.com/documentation/services/data-factory/) can be used to orchestrate the task of creating the updated blobs required by Stream Analytics to update reference data definitions. Data Factory is a cloud-based data integration service that orchestrates and automates the movement and transformation of data. Data Factory supports [connecting to a large number of cloud based and on-premises data stores](../data-factory/copy-activity-overview.md) and moving data easily on a regular schedule that you specify. For more information and step by step guidance on how to set up a Data Factory pipeline to generate reference data for Stream Analytics which refreshes on a pre-defined schedule, check out this [GitHub sample](https://github.com/Azure/Azure-DataFactory/tree/master/SamplesV1/ReferenceDataRefreshForASAJobs).
+[Azure Data Factory](https://azure.microsoft.com/documentation/services/data-factory/) can be used to orchestrate the task of creating the updated blobs required by Stream Analytics to update reference data definitions.
+
+Data Factory is a cloud-based data integration service that orchestrates and automates the movement and transformation of data. Data Factory supports [connecting to a large number of cloud-based and on-premises data stores](../data-factory/copy-activity-overview.md). It can move data easily on a regular schedule that you specify.
+
+For more information on how to set up a Data Factory pipeline to generate reference data for Stream Analytics that refreshes on a predefined schedule, see this [GitHub sample](https://github.com/Azure/Azure-DataFactory/tree/master/SamplesV1/ReferenceDataRefreshForASAJobs).
### Tips on refreshing blob reference data
-1. Do not overwrite reference data blobs as they are immutable.
-2. The recommended way to refresh reference data is to:
- * Use {date}/{time} in the path pattern
- * Add a new blob using the same container and path pattern defined in the job input
- * Use a date/time **greater** than the one specified by the last blob in the sequence.
-3. Reference data blobs are **not** ordered by the blobΓÇÖs "Last Modified" time but only by the time and date specified in the blob name using the {date} and {time} substitutions.
-3. To avoid having to list large number of blobs, consider deleting very old blobs for which processing will no longer be done. Please note that ASA might go have to reprocess a small amount in some scenarios like a restart.
+- Don't overwrite reference data blobs because they're immutable.
+- The recommended way to refresh reference data is to:
+ * Use {date}/{time} in the path pattern.
+ * Add a new blob by using the same container and path pattern defined in the job input.
+ * Use a date/time *greater* than the one specified by the last blob in the sequence.
+- Reference data blobs are *not* ordered by the blob's **Last Modified** time. They're only ordered by the date and time specified in the blob name using the {date} and {time} substitutions.
+- To avoid having to list a large number of blobs, delete old blobs for which processing will no longer be done. Stream Analytics might have to reprocess a small amount in some scenarios, like a restart.
## Azure SQL Database
-Azure SQL Database reference data is retrieved by your Stream Analytics job and is stored as a snapshot in memory for processing. The snapshot of your reference data is also stored in a container in a storage account that you specify in the configuration settings. The container is auto-created when the job starts. If the job is stopped or enters a failed state, the auto-created containers are deleted when the job is restarted.
+Your Stream Analytics job retrieves SQL Database reference data and stores it as a snapshot in memory for processing. The snapshot of your reference data is also stored in a container in a storage account. You specify the storage account in the configuration settings.
+
+The container is auto-created when the job starts. If the job stops or enters a failed state, the auto-created containers are deleted when the job restarts.
+
+If your reference data is a slowly changing dataset, you need to periodically refresh the snapshot that's used in your job.
-If your reference data is a slowly changing data set, you need to periodically refresh the snapshot that is used in your job. Stream Analytics allows you to set a refresh rate when you configure your Azure SQL Database input connection. The Stream Analytics runtime will query your Azure SQL Database at the interval specified by the refresh rate. The fastest refresh rate supported is once per minute. For each refresh, Stream Analytics stores a new snapshot in the storage account provided.
+With Stream Analytics, you can set a refresh rate when you configure your SQL Database input connection. The Stream Analytics runtime queries your SQL Database instance at the interval specified by the refresh rate. The fastest refresh rate supported is once per minute. For each refresh, Stream Analytics stores a new snapshot in the storage account provided.
-Stream Analytics provides two options for querying your Azure SQL Database. A snapshot query is mandatory and must be included in each job. Stream Analytics runs the snapshot query periodically based on your refresh interval and uses the result of the query (the snapshot) as the reference data set. The snapshot query should fit most scenarios, but if you run into performance issues with large data sets and fast refresh rates, you can use the delta query option. Queries that take more than 60 seconds to return reference data set will result in a timeout.
+Stream Analytics provides two options for querying your SQL Database instance. A snapshot query is mandatory and must be included in each job. Stream Analytics runs the snapshot query periodically based on your refresh interval. It uses the result of the query (the snapshot) as the reference dataset.
-With the delta query option, Stream Analytics runs the snapshot query initially to get a baseline reference data set. After, Stream Analytics runs the delta query periodically based on your refresh interval to retrieve incremental changes. These incremental changes are continually applied to the reference data set to keep it updated. Using delta query may help reduce storage cost and network I/O operations.
+The snapshot query should fit most scenarios. If you run into performance issues with large datasets and fast refresh rates, use the delta query option. Queries that take more than 60 seconds to return a reference dataset result in a timeout.
-### Configure SQL Database reference
+With the delta query option, Stream Analytics runs the snapshot query initially to get a baseline reference dataset. Afterwards, Stream Analytics runs the delta query periodically based on your refresh interval to retrieve incremental changes. These incremental changes are continually applied to the reference dataset to keep it updated. Using the delta query option might help reduce storage cost and network I/O operations.
-To configure your SQL Database reference data, you first need to create **Reference Data** input. The table below explains each property that you will need to provide while creating the reference data input with its description. For more information, see [Use reference data from a SQL Database for an Azure Stream Analytics job](sql-reference-data.md).
+### Configure SQL Database reference data
-You can use [Azure SQL Managed Instance](../azure-sql/managed-instance/sql-managed-instance-paas-overview.md) as a reference data input. You have to [configure public endpoint in SQL Managed Instance](../azure-sql/managed-instance/public-endpoint-configure.md) and then manually configure the following settings in Azure Stream Analytics. Azure virtual machine running SQL Server with a database attached is also supported by manually configuring the settings below.
+To configure your SQL Database reference data, you first need to create reference data input. The following table explains each property you need to provide when you create the reference data input with its description. For more information, see [Use reference data from a SQL Database for a Stream Analytics job](sql-reference-data.md).
-|**Property Name**|**Description** |
+You can use [Azure SQL Managed Instance](../azure-sql/managed-instance/sql-managed-instance-paas-overview.md) as a reference data input. You must [configure a public endpoint in SQL Managed Instance](../azure-sql/managed-instance/public-endpoint-configure.md). Then you manually configure the following settings in Stream Analytics. An Azure virtual machine running SQL Server with a database attached is also supported by manually configuring these settings.
+
+|Property name|Description |
|||
-|Input alias|A friendly name that will be used in the job query to reference this input.|
-|Subscription|Choose your subscription|
-|Database|The Azure SQL Database that contains your reference data. For SQL Managed Instance, it is required to specify the port 3342. For example, *sampleserver.public.database.windows.net,3342*|
-|Username|The username associated with your Azure SQL Database.|
-|Password|The password associated with your Azure SQL Database.|
-|Refresh periodically|This option allows you to choose a refresh rate. Choosing "On" will allow you to specify the refresh rate in DD:HH:MM.|
-|Snapshot query|This is the default query option that retrieves the reference data from your SQL Database.|
-|Delta query|For advanced scenarios with large data sets and a short refresh rate, choose to add a delta query.|
+|Input alias|A friendly name used in the job query to reference this input.|
+|Subscription|Your subscription.|
+|Database|The SQL Database instance that contains your reference data. For SQL Managed Instance, you must specify the port 3342. An example is *sampleserver.public.database.windows.net,3342*.|
+|Username|The username associated with your SQL Database instance.|
+|Password|The password associated with your SQL Database instance.
+|Refresh periodically|This option allows you to select a refresh rate. Select **On** to specify the refresh rate in DD:HH:MM.|
+|Snapshot query|This default query option retrieves the reference data from your SQL Database instance.|
+|Delta query|For advanced scenarios with large datasets and a short refresh rate, add a delta query.|
## Size limitation
-It is recommended to use reference datasets which are less than 300 MB for best performance. Reference datasets 5 GB or lower is supported in jobs with 6 SUs or more. Using a very large reference data may impact end-to-end latency of your job. As the complexity of query increases to include stateful processing, such as windowed aggregates, temporal joins and temporal analytic functions, it is expected that the maximum supported size of reference data decreases. If Azure Stream Analytics cannot load the reference data and perform complex operations, the job will run out of memory and fail. In such cases, SU % Utilization metric will reach 100%.
+Use reference datasets that are less than 300 MB for best performance. Reference datasets 5 GB or lower are supported in jobs with six streaming units or more. Using a large reference dataset might affect end-to-end latency of your job.
+
+Query complexity can increase to include stateful processing such as windowed aggregates, temporal joins, and temporal analytic functions. When complexity increases, the maximum supported size of reference data decreases.
-|**Number of Streaming Units** |**Recommended Size** |
+If Stream Analytics can't load the reference data and perform complex operations, the job runs out of memory and fails. In such cases, the streaming unit percent utilization metric will reach 100%.
+
+|Number of streaming units |Recommended size |
||| |1 |50 MB or lower | |3 |150 MB or lower |
-|6 and beyond |5 GB or lower. |
+|6 and beyond |5 GB or lower |
+
+Support for compression isn't available for reference data. For reference datasets larger than 300 MB, use SQL Database as the source with the [delta query](./sql-reference-data.md#delta-query) option for optimal performance. If the delta query option isn't used in such scenarios, you'll see spikes in the watermark delay metric every time the reference dataset is refreshed.
-Support for compression is not available for reference data. For reference datasets larger than 300 MB, it is recommended to use Azure SQL Database as the source with [delta query](./sql-reference-data.md#delta-query) option for optimal performance. If delta query is not used in such scenarios, you will see in spikes in watermark delay metric every time the reference dataset is refreshed.
+## Join multiple reference datasets in a job
-## Joining multiple reference datasets in a job
-You can join only one stream input with one reference data input in a single step of your query. However, you can join multiple reference datasets by breaking down your query into multiple steps. An example is shown below.
+You can join only one stream input with one reference data input in a single step of your query. To join multiple reference datasets, break down your query into multiple steps. Here's an example:
```SQL With Step1 as (
SELECT *
INTO output FROM Step1 JOIN refData2 ON refData2.Desc = Step1.Desc
-```
+```
## IoT Edge jobs
-Only local reference data is supported for Stream Analytics edge jobs. When a job is deployed to IoT Edge device, it loads reference data from the user defined file path. Have a reference data file ready on the device. For a Windows container, put the reference data file on the local drive and share the local drive with the Docker container. For a Linux container, create a Docker volume and populate the data file to the volume.
+Only local reference data is supported for Stream Analytics edge jobs. When a job is deployed to an IoT Edge device, it loads reference data from the user-defined file path. Have a reference data file ready on the device.
-Reference data on IoT Edge update is triggered by a deployment. Once triggered, the Stream Analytics module picks the updated data without stopping the running job.
+For a Windows container, put the reference data file on the local drive and share the local drive with the Docker container. For a Linux container, create a Docker volume and populate the data file to the volume.
-There are two ways to update the reference data:
+Reference data on an IoT Edge update is triggered by a deployment. After it's triggered, the Stream Analytics module picks the updated data without stopping the running job.
-* Update reference data path in your Stream Analytics job from Azure portal.
+You can update the reference data in two ways:
+* Update the reference data path in your Stream Analytics job from the Azure portal.
* Update the IoT Edge deployment. ## Next steps
synapse-analytics Data Integration https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/partner/data-integration.md
To create your data warehouse solution using the dedicated SQL pool in Azure Syn
| ![oh22 HEDDA.IO](./media/data-integration/heddaiowhitebg-logo.png) |**oh22 HEDDA<span></span>.IO**<br>oh22's HEDDA<span></span>.IO is a knowledge-driven data quality product built for Microsoft Azure. It enables you to build a knowledge base and use it to perform various critical data quality tasks, including correction, enrichment, and standardization of your data. HEDDA<span></span>.IO also allows you to do data cleansing by using cloud-based reference data services provided by reference data providers or developed and provided by you.| [Product page](https://github.com/oh22is/HEDDA.IO)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/oh22.hedda-io) | | ![Precisely](./media/data-integration/precisely-logo.png) |**Precisely**<br>Precisely Connect ETL enables extract transfer and load (ETL) of data from multiple sources to Azure targets. Connect ETL is an easy to configure tool that doesn't require coding or tuning. ETL transformation can be done on the fly. It eliminates the need for costly database staging areas or manual pushes, allowing you to create your own data blends with consistent sustainable performance. Import legacy data from multiple sources including mainframe DB2, VSAM, IMS, Oracle, SQL Server, Teradata, and write them to cloud targets including Azure Databricks, Azure Synapse Analytics, and Azure Data Lake Storage. By using the high performance Connect ETL engine, you can expect optimal performance and consistency.|[Product page](https://www.precisely.com/solution/microsoft-azure)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/syncsort.dmx) | | ![Qlik Data Integration](./media/business-intelligence/qlik_logo.png) |**Qlik Data Integration**<br>Qlik Data Integration provides an automated solution for loading data into an Azure Synapse. It simplifies batch loading and incremental replication of data from many sources: SQL Server, Oracle, DB2, Sybase, MySQL, and more. |[Product page](https://www.qlik.com/us/products/data-integration-products)<br>[Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/qlik.qlik_data_integration_platform) <br> |
-| ![Qubole](./media/data-integration/qubole_logo.png) |**Qubole**<br>Qubole provides a cloud-native platform that enables users to conduct ETL, analytics, and AI/ML workloads. It supports different kinds of open-source engines - Apache Spark, TensorFlow, Presto, Airflow, Hadoop, Hive, and more. It provides easy-to-use end-user tools for data processing from SQL query tools, to notebooks, and dashboards that use powerful open-source engines.|[Product page](https://www.qubole.com/company/partners/partners-microsoft-azure/)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/qubole-inc.qubole-data-service?tab=Overview) |
+| ![Qubole](./media/data-integration/qubole_logo.png) |**Qubole**<br>Qubole provides a cloud-native platform that enables users to conduct ETL, analytics, and AI/ML workloads. It supports different kinds of open-source engines - Apache Spark, TensorFlow, Presto, Airflow, Hadoop, Hive, and more. It provides easy-to-use end-user tools for data processing from SQL query tools, to notebooks, and dashboards that use powerful open-source engines.|[Product page](https://www.qubole.com/company/partners/partners-microsoft-azure/) |
| ![SAS](./media/business-intelligence/sas-logo.jpg) |**SAS® Viya®**<br>SAS® Viya® is an AI, analytic, and data management solution running on a scalable, cloud-native architecture. It enables you to operationalize insights, empowering everyone – from data scientists to business users – to collaborate and realize innovative results faster. Using open source or SAS models, SAS® Viya® can be accessed through APIs or interactive interfaces to transform raw data into actions. |[Product page](https://www.sas.com/microsoft)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/sas-institute-560503.sas-viya-saas?tab=Overview)<br> | | ![Segment](./media/data-integration/segment_logo.png) |**Segment**<br>Segment is a data management and analytics solution that helps you make sense of customer data coming from various sources. It allows you to connect your data to over 200 tools to create better decisions, products, and experiences. Segment will transform and load multiple data sources into your warehouse for you using its built-in data connectors|[Product page](https://segment.com/)<br> | | ![Skyvia](./media/data-integration/skyvia_logo.png) |**Skyvia (data integration)**<br>Skyvia data integration provides a wizard that automates data imports. This wizard allows you to migrate data between different kinds of sources - CRMs, application database, CSV files, and more. |[Product page](https://skyvia.com/)<br> |
synapse-analytics Machine Learning Ai https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/partner/machine-learning-ai.md
This article highlights Microsoft partners with machine learning and artificial
| - | -- | -- | | ![Dataiku](./media/machine-learning-and-ai/dataiku-logo.png) |**Dataiku**<br>Dataiku is the centralized data platform that moves businesses along their data journey from analytics at scale to Enterprise AI, powering self-service analytics while also ensuring the operationalization of machine learning models in production. |[Product page](https://www.dataiku.com/partners/microsoft/)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/dataiku.dataiku-data-science-studio)<br> | | ![MATLAB](./media/machine-learning-and-ai/mathworks-logo.png) |**Matlab**<br>MATLAB® is a programming platform designed for engineers and scientists. It combines a desktop environment tuned for iterative analysis and design processes with a programming language that expresses matrix and array mathematics directly. Millions worldwide use MATLAB for a range of applications, including machine learning, deep learning, signal and image processing, control systems, and computational finance. |[Product page](https://www.mathworks.com/products/database.html)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/mathworks-inc.matlab-byol?tab=Overview)<br> |
-| ![Qubole](./media/data-integration/qubole_logo.png) |**Qubole**<br>Qubole provides a cloud-native platform that enables users to conduct ETL, analytics, and AI/ML workloads. It supports different kinds of open-source engines - Apache Spark, TensorFlow, Presto, Airflow, Hadoop, Hive, and more. It provides easy-to-use end-user tools for data processing from SQL query tools, to notebooks, and dashboards that use powerful open-source engines.|[Product page](https://www.qubole.com/company/partners/partners-microsoft-azure/)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/qubole-inc.qubole-data-service?tab=Overview)
-| ![SAS](./media/business-intelligence/sas-logo.jpg) |**SAS® Viya®**<br>SAS® Viya® is an AI, analytic, and data management solution running on a scalable, cloud-native architecture. It enables you to operationalize insights, empowering everyone – from data scientists to business users – to collaborate and realize innovative results faster. Using open source or SAS models, SAS® Viya® can be accessed through APIs or interactive interfaces to transform raw data into actions. |[Product page](https://www.sas.com/microsoft)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/sas-institute-560503.sas-viya-saas?tab=Overview)<br> |
+| ![Qubole](./media/data-integration/qubole_logo.png) |**Qubole**<br>Qubole provides a cloud-native platform that enables users to conduct ETL, analytics, and AI/ML workloads. It supports different kinds of open-source engines - Apache Spark, TensorFlow, Presto, Airflow, Hadoop, Hive, and more. It provides easy-to-use end-user tools for data processing from SQL query tools, to notebooks, and dashboards that use powerful open-source engines.|[Product page](https://www.qubole.com/company/partners/partners-microsoft-azure/) | ![SAS](./media/business-intelligence/sas-logo.jpg) |**SAS® Viya®**<br>SAS® Viya® is an AI, analytic, and data management solution running on a scalable, cloud-native architecture. It enables you to operationalize insights, empowering everyone – from data scientists to business users – to collaborate and realize innovative results faster. Using open source or SAS models, SAS® Viya® can be accessed through APIs or interactive interfaces to transform raw data into actions. |[Product page](https://www.sas.com/microsoft)<br> [Azure Marketplace](https://azuremarketplace.microsoft.com/marketplace/apps/sas-institute-560503.sas-viya-saas?tab=Overview)<br> |
## Next steps To learn more about other partners, see [Business Intelligence partners](business-intelligence.md), [Data Integration partners](data-integration.md), and [Data Management partners](data-management.md).
synapse-analytics Workspaces Encryption https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/security/workspaces-encryption.md
Previously updated : 03/07/2022 Last updated : 03/24/2022
SQL Transparent Data Encryption (TDE) is available for dedicated SQL Pools in wo
## Next steps
-[Use built-in Azure Policies to implement encryption protection for Synapse workspaces](../policy-reference.md)
-
-[Create an Azure key vault and a key by using Resource Manager template](../../key-vault/keys/quick-create-template.md)
+- [Use built-in Azure Policies to implement encryption protection for Synapse workspaces](../policy-reference.md)
+- [Create an Azure key vault and a key by using Resource Manager template](../../key-vault/keys/quick-create-template.md)
synapse-analytics Apache Spark Azure Machine Learning Tutorial https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/spark/apache-spark-azure-machine-learning-tutorial.md
In Azure Machine Learning, a workspace is a class that accepts your Azure subscr
```python from azureml.core import Workspace
-# Enter your workspace subscription, resource group, name, and region.
+# Enter your subscription id, resource group, and workspace name.
subscription_id = "<enter your subscription ID>" #you should be owner or contributor resource_group = "<enter your resource group>" #you should be owner or contributor workspace_name = "<enter your workspace name>" #your workspace name
-workspace_region = "<enter workspace region>" #your region
ws = Workspace(workspace_name = workspace_name, subscription_id = subscription_id, resource_group = resource_group)- ``` ## Convert a DataFrame to an Azure Machine Learning dataset
After you've validated your best model, you can register it to Azure Machine Lea
```python description = 'My automated ML model' model_path='outputs/model.pkl'
-model = best_run.register_model(model_name = 'NYCGreenTaxiModel', model_path = model_path, description = description)
+model = best_run.register_model(model_name = 'NYCYellowTaxiModel', model_path = model_path, description = description)
print(model.name, model.version) ``` ```Output
-NYCGreenTaxiModel 1
+NYCYellowTaxiModel 1
``` ## View results in Azure Machine Learning You can also access the results of the iterations by going to the experiment in your Azure Machine Learning workspace. Here, you can get additional details on the status of your run, attempted models, and other model metrics.
You can also access the results of the iterations by going to the experiment in
## Next steps - [Azure Synapse Analytics](../index.yml)-- [Tutorial: Build a machine learning app with Apache Spark MLlib and Azure Synapse Analytics](./apache-spark-machine-learning-mllib-notebook.md)
+- [Tutorial: Build a machine learning app with Apache Spark MLlib and Azure Synapse Analytics](./apache-spark-machine-learning-mllib-notebook.md)
synapse-analytics Apache Spark Custom Conda Channel https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/spark/apache-spark-custom-conda-channel.md
mkdir -P channel/linux64
<Add all .tar.bz2 from https://repo.anaconda.com/pkgs/main/linux-64/> // Note: Add all dependent .tar.bz2 as well
-cd channel1
+cd channel
mkdir noarch echo '{}' > noarch/repodata.json bzip2 -k noarch/repodata.json
dependencies:
Once you've created the sample Conda file, you can create a virtual Conda environment. You can verify this locally by running the following commands: ```
-conda env create ΓÇôfile sample.yml
+conda env create --file sample.yml
source activate env conda list ```
synapse-analytics Release Notes 10 0 10106 0 https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/sql-data-warehouse/release-notes-10-0-10106-0.md
description: Release notes for dedicated SQL pool (formerly SQL DW) in Azure Syn
Previously updated : 1/13/2022 Last updated : 3/24/2022
For tooling improvements, make sure you have the correct version installed speci
| Service improvements | Details | | | |
-|**Column-Level Encryption (Public Preview)**|Protect sensitive information in your Azure Synapse Analytics by applying symmetric encryption to a column of data using Transact-SQL. Column-level encryption has built-in functions you can use to encrypt data using symmetric keys that are further protected with a certificate, password, symmetric key, or asymmetric key. For more information, please visit [Encrypt a Column of Data](/sql/relational-databases/security/encryption/encrypt-a-column-of-data?view=azure-sqldw-latest&preserve-view=true).|
+|**Column-Level Encryption (Public Preview)**|Protect sensitive information in your Azure Synapse Analytics by applying symmetric encryption to a column of data using Transact-SQL. Column-level encryption has built-in functions you can use to encrypt data using symmetric keys that are further protected with a certificate, password, symmetric key, or asymmetric key. For more information, please visit [Encrypt a Column of Data](/sql/relational-databases/security/encryption/encrypt-a-column-of-data?view=azure-sqldw-latest&preserve-view=true). **This feature is now generally available.**|
|**Compatibility Level support (GA)**|With this release, users can now set a database's compatibility level to get the Transact-SQL language and query processing behaviors of a specific version of the Synapse SQL engine. For more information, see [sys.database_scoped_configurations](/sql/relational-databases/system-catalog-views/sys-database-scoped-configurations-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) and [Alter Database Scoped Configuration](/sql/t-sql/statements/alter-database-scoped-configuration-transact-sql?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true).| |**Row Level Security**|This release includes an improvement for update and delete operations on rows with RLS enforced on them. With this release, update and delete operations with intrinsic functions like 'is_rolemember' will succeed if the intrinsic does not reference any column in the DML target table. Before this improvement, these operations failed due to limitation in the underlying DML operations.| |**DBCC SHRINKDATABASE (GA)**|You can now shrink the size of the data and log files in the specified database. For more info, see the [documentation](/sql/t-sql/database-console-commands/dbcc-shrinkdatabase-transact-sql?view=azure-sqldw-latest&preserve-view=true).|
synapse-analytics Overview Features https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/synapse-analytics/sql/overview-features.md
Previously updated : 02/15/2022 Last updated : 03/24/2022
Synapse SQL pools enable you to use built-in security features to secure your da
| **Permissions - [Database-level](/sql/relational-databases/security/authentication-access/database-level-roles?view=azure-sqldw-latest&preserve-view=true)** | Yes | Yes, you can grant, deny, or revoke permissions on the database objects. | | **Permissions - Schema-level** | Yes, including ability to GRANT, DENY, and REVOKE permissions to users/logins on the schema | Yes, you can specify schema-level permissions including ability to GRANT, DENY, and REVOKE permissions to users/logins on the schema. | | **Permissions - Object-level** | Yes, including ability to GRANT, DENY, and REVOKE permissions to users | Yes, you can GRANT, DENY, and REVOKE permissions to users/logins on the system objects that are supported. |
-| **Permissions - [Column-level security](../sql-data-warehouse/column-level-security.md?toc=%2fazure%2fsynapse-analytics%2ftoc.json)** | Yes | Yes, column-level security is supported in serverless SQL pools. |
+| **Permissions - [Column-level security](../sql-data-warehouse/column-level-security.md?toc=%2fazure%2fsynapse-analytics%2ftoc.json)** | Yes | Yes, column-level security is supported in serverless SQL pools. Column level encryption is also generally available, see [Encrypt a column of data](/sql/relational-databases/security/encryption/encrypt-a-column-of-data?view=azure-sqldw-latest&preserve-view=true).|
| **Row-level security** | [Yes](/sql/relational-databases/security/row-level-security?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true) | No, there is no built-in support for the row-level security. Use custom views as a [workaround](https://techcommunity.microsoft.com/t5/azure-synapse-analytics-blog/how-to-implement-row-level-security-in-serverless-sql-pools/ba-p/2354759). | | **Data masking** | [Yes](../guidance/security-white-paper-access-control.md#dynamic-data-masking) | No, built-in data masking is not supported in the serverless SQL pools. Use wrapper SQL views that explicitly mask some columns as a workaround. | | **Built-in/system security &amp; identity functions** | Some Transact-SQL security functions and operators: `CURRENT_USER`, `HAS_DBACCESS`, `IS_MEMBER`, `IS_ROLEMEMBER`, `SESSION_USER`, `SUSER_NAME`, `SUSER_SNAME`, `SYSTEM_USER`, `USER`, `USER_NAME`, `EXECUTE AS`, `OPEN/CLOSE MASTER KEY` | Some Transact-SQL security functions and operators are supported: `CURRENT_USER`, `HAS_DBACCESS`, `HAS_PERMS_BY_NAME`, `IS_MEMBER`, `IS_ROLEMEMBER`, `IS_SRVROLEMEMBER`, `SESSION_USER`, `SESSION_CONTEXT`, `SUSER_NAME`, `SUSER_SNAME`, `SYSTEM_USER`, `USER`, `USER_NAME`, `EXECUTE AS`, and `REVERT`. Security functions cannot be used to query external data (store the result in variable that can be used in the query). |
time-series-insights How To Tsi Gen2 Migration https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/time-series-insights/how-to-tsi-gen2-migration.md
The Power BI query copied from TSI UX Explorer looks like as shown below
``` {"storeType":"ColdStore","isSearchSpanRelative":false,"clientDataType":"RDX_20200713_Q","environmentFqdn":"6988946f-2b5c-4f84-9921-530501fbab45.env.timeseries.azure.com", "queries":[{"aggregateSeries":{"searchSpan":{"from":"2019-10-31T23:59:39.590Z","to":"2019-11-01T05:22:18.926Z"},"timeSeriesId":["Arctic Ocean",null],"interval":"PT1M", "inlineVariables":{"EventCount":{"kind":"aggregate","aggregation":{"tsx":"count()"}}},"projectedVariables":["EventCount"]}}]} ```-- To convert it to TSQ, build a JSON from the above payload. The AggregateSeries API documentation also has examples to understand it better. [Query - Execute - REST API (Azure Time Series Insights) | Microsoft Docs](/azure/rest/api/time-series-insights/dataaccessgen2/query/execute#queryaggregateseriespage1)
+- To convert it to TSQ, build a JSON from the above payload. The AggregateSeries API documentation also has examples to understand it better. [Query - Execute - REST API (Azure Time Series Insights) | Microsoft Docs](/rest/api/time-series-insights/dataaccessgen2/query/execute#queryaggregateseriespage1)
- The converted TSQ looks like as shown below. It's the JSON payload inside ΓÇ£queriesΓÇ¥ ``` {
virtual-desktop Azure Monitor Glossary https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-desktop/azure-monitor-glossary.md
Previously updated : 03/29/2021 Last updated : 03/25/2022
The most urgent items that you need to take care of right away. If you don't add
## Time to connect
-Time to connect is the time between when a user clicks a resource to start their session and when their desktop has loaded and is ready to use. For remote app use cases this is the time to launch the application. For new sessions this time encompasses two primary stages: connection, Azure service timing related to the time to route the user to a session host, and logon, the length of time taken to perform personalization and other tasks to establish a session on the session host. When monitoring time to connect, keep in mind the following things:
+Time to connect is the time between when a user opens a resource to start their session and when their desktop has loaded and is ready to use. For example, for RemoteApps, this is the time it takes to launch the application.
-* Time to connect is measured with the following checkpoints from AVDΓÇÖs service data:
+Time to connect has two stages:
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Begins: [WVDConnection](https://docs.microsoft.com/azure/azure-monitor/reference/tables/wvdconnections) state = started
+- Connection, which is how long it takes for the Azure service to route the user to a session host.
+- "Logon," which is how long it takes for the service to perform tasks related to signing in the user and establishing the session on the session host.
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Ends: [WVDCheckpoints](https://docs.microsoft.com/azure/azure-monitor/reference/tables/wvdcheckpoints) Name = ShellReady (desktops); Name = first app launch for RemoteApp (RdpShellAppExecuted)
+When monitoring time to connect, keep in mind the following things:
-As an example, the time for a desktop experience to launch would be measured up to the launch of Windows Explorer (explorer.exe).
+- Time to connect is measured with the following checkpoints from Azure Virtual Desktop service data:
-- Establishing new sessions tends to take longer than reestablishing connections to existing sessions due to Logon stages required in new session setup.
+ - Begins: [WVDConnection](/azure/azure-monitor/reference/tables/wvdconnections) state = started
-- The time it takes the user to provide credentials is subtracted from their time to connect to help avoid signaling long connection times where a user may have had a long delay to enter credentials or use alternative authentication methods.
+ - Ends: [WVDCheckpoints](/azure/azure-monitor/reference/tables/wvdcheckpoints) Name = ShellReady (desktops); Name = first app launch for RemoteApp (RdpShellAppExecuted)
-When troubleshooting a high time to connect, you can breakdown total connection time into a few components to help identify actionable ways to reduce logon time.
+ For example, the time for a desktop experience to launch would be measured based on how long it takes to launch Windows Explorer (explorer.exe).
+
+- Establishing new sessions usually takes longer than reestablishing connections to existing sessions due to differences in the "logon" process for new and established connections.
+
+- The time it takes for the user to provide credentials is subtracted from their time to connect to account for situations where a user either takes a while to enter credentials or use alternative authentication methods to sign in.
+
+When troubleshooting a high time to connect, Azure Monitor will break down total connection time data into four components to help you identify how to reduce sign-in time.
>[!NOTE]
->Only primary connection steps are surfaced in the stages, and these components can run in parallel, meaning they will not add up to equal a total time to connect.
+>The components in this section only show the primary connection stages. These components can run in parallel, which means they won't add up to equal the total time to connect. The total time to connect is a measurement that Azure Monitor determines in a separate process.
-Connection stages:
+The following flowchart shows the four stages of the sign-in process:
- ```mermaid
-flowchart LR
- id0{{User Initiates Connection}}
- id1[User Route]
- id2[Stack Connect]
- id3[Logon]
- id4[Shell Start]
- id5{{Session is Ready}}
- id0 --> id1
- id1 --> id2
- id2 --> id3
- id2 --> id4
- id3 --> id5
- id4 --> id5
-```
-- User route: Time from when the user clicks the icon to launch a session to when the service identifies a host to connect to. Network load, service load, or unique network traffic routing could lead to high routing times. Troubleshooting may require more detailed network path investigation.+
+The flowchart shows the following four components:
+
+- User route: the time it takes from when the user selects the Azure Virtual Desktop icon to launch a session to when the service identifies a host to connect to. High network load, high service load, or unique network traffic routing can lead to high routing times. To troubleshoot user route issues, look at your network paths.
-- Stack connected: Time from when the service has resolved a target session host for the user to when the connection is established from the session host to the userΓÇÖs remote client. Like user routing, the network load, server load, or unique network traffic routing could lead to high connection times. An additional consideration for network routing would be ensuring proxy configurations on both the client and session host side are appropriately configured and routing to the service is optimal.
+- Stack connected: the time it takes from when the service resolves a target session host for the user to when the service establishes a connection between the session host and the userΓÇÖs remote client. Like user routing, the network load, server load, or unique network traffic routing can affect connection time. For this component, you'll also need to pay attention to your network routing. To reduce connection time, make sure you've appropriately configured all proxy configurations on both the client and session hosts, and that routing to the service is optimal.
-- Logon: Time from when the connection to a host is established to when the shell starts to load. Logon time includes several processes that can contribute to high logon time; you can use Logon stages in Insights to identify peaks and see Logon stages documentation below to learn more. More details on the logon stages are provided in the next section.
+- Logon: the time it takes between when a connection to a host is established to when the shell starts to load. Logon time includes several processes that can contribute to high connection times. You can view data for the "logon" stage in Insights to see if there are unexpected peaks in average times.
-- Shell start to shell ready: Time from when the shell starts to load to when it is fully loaded and ready for use. The most likely sources of delays in this phase include session host overload (high CPU, memory, or disk activity) or configuration issues.
+ The "logon" process is divided into four stages:
-Logon stages:
+ - Profiles: the time it takes to load a userΓÇÖs profile for new sessions. How long loading takes depends on user profile size or the user profile solutions you're using (such as User Experience Virtualization). If you're using a solution that depends on network-stored profiles, excess latency can also lead to longer profile loading times.
-- Profiles: The time it takes to load a userΓÇÖs profile for new sessions. This time will largely relate to profile sizes or user profile solutions in use (e.g., User Experience Virtualization). For solutions making use of a network stored profile excess latency may also lead to longer profile loading times.
+ - Group Policy Objects (GPOs): the time it takes to apply group policies to new sessions. A spike in this area of the data is a sign that you have too many group policies, the policies take too long to apply, or the session host is experiencing resource issues. One thing you can do to optimize processing times is make sure the domain controller is close to session hosts as possible.
-- Group Policy (GPOs): Time it takes to apply group policies to new sessions. A spike in this time bucket indicates that you have too many group policies, the policies take too long to apply, or the session host is experiencing resource issues. As a further note, the Domain Controller (DC) needs to be close to session hosts for optimal GPO processing times.
+ - Shell Start: the time it takes to launch the shell (usually explorer.exe).
-- Shell Start: The time it takes to launch the shell (usually explorer.exe).
+ - FSLogix (Frxsvc): the time it takes to launch FSLogix in new sessions. A long launch time may indicate issues with the shares used to host the FSLogix user profiles. To troubleshoot these issues, make sure the shares are collocated with the session hosts and appropriately scaled for the average number of users signing in to the hosts. Another area you should look at is profile size. Large profile sizes can slow down launch times.
-- FSLogix (Frxsvc): Time it takes to launch FSLogix in new sessions. If this time is slow, it may indicate issues with the shares used to host the FSLogix user profiles; ensure the shares are collocated with the session hosts and appropriately scaled for the user volume logging into hosts. Additionally, larger profile sizes could contribute to slowness.
+- Shell start to shell ready: the time from when the shell starts to load to when it's fully loaded and ready for use. Delays in this phase can be caused by session host overload (high CPU, memory, or disk activity) or configuration issues.
## User report
virtual-desktop Create File Share https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-desktop/create-file-share.md
To set up a storage account:
4. Enter the following information into the **Create storage account** page: - Create a new resource group.
- - Enter a unique name for your storage account.
+ - Enter a unique name for your storage account. This storage account name currently has a limit of 15 characters.
- For **Location**, we recommend you choose the same location as the Azure Virtual Desktop host pool. - For **Performance**, select **Standard**. (Depending on your IOPS requirements. For more information, see [Storage options for FSLogix profile containers in Azure Virtual Desktop](store-fslogix-profile.md).) - For **Account type**, select **StorageV2** or **FileStorage** (only available if Performance tier is Premium).
virtual-machines Automatic Vm Guest Patching https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/automatic-vm-guest-patching.md
To modify an existing VM, use [az vm update](/cli/azure/vm#az-vm-update)
az vm update --resource-group myResourceGroup --name myVM --set osProfile.windowsConfiguration.enableAutomaticUpdates=true osProfile.windowsConfiguration.patchSettings.patchMode=AutomaticByPlatform ```
+### Azure portal
+When creating a VM using the Azure portal, patch orchestration modes can be set under the **Management** tab for both Linux and Windows.
++ ## Enablement and assessment > [!NOTE]
virtual-machines Boot Diagnostics https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/boot-diagnostics.md
Everything after API version 2020-06-01 supports managed boot diagnostics. For m
- Managed storage accounts are supported in Resource Manager API version "2020-06-01" and later. - Azure Serial Console is currently incompatible with a managed storage account for boot diagnostics. Learn more about [Azure Serial Console](/troubleshoot/azure/virtual-machines/serial-console-overview). - Portal only supports the use of boot diagnostics with a managed storage account for single instance VMs.
+- Users canont configure a retention period for Managed Boot Diagnostics. The logs will be overwritten when the total size crosses 1 GB.
## Next steps
virtual-machines Disks Change Performance https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/disks-change-performance.md
description: Learn about performance tiers for managed disks.
Previously updated : 06/29/2021 Last updated : 03/24/2022
The performance of your Azure managed disk is set when you create your disk, in
Changing the performance tier allows you to prepare for and meet higher demand without using your disk's bursting capability. It can be more cost-effective to change your performance tier rather than rely on bursting, depending on how long the additional performance is necessary. This is ideal for events that temporarily require a consistently higher level of performance, like holiday shopping, performance testing, or running a training environment. To handle these events, you can use a higher performance tier for as long as you need it. You can then return to the original tier when you no longer need the additional performance.
+## Restrictions
++ ## How it works When you first deploy or provision a disk, the baseline performance tier for that disk is set based on the provisioned disk size. You can use a performance tier higher than the original baseline to meet higher demand. When you no longer need that performance level, you can return to the initial baseline performance tier.
+### Billing impact
+ Your billing changes as your performance tier changes. For example, if you provision a P10 disk (128 GiB), your baseline performance tier is set as P10 (500 IOPS and 100 MBps). You'll be billed at the P10 rate. You can upgrade the tier to match the performance of P50 (7,500 IOPS and 250 MBps) without increasing the disk size. During the time of the upgrade, you'll be billed at the P50 rate. When you no longer need the higher performance, you can return to the P10 tier. The disk will once again be billed at the P10 rate.
+For billing information, see [Managed disk pricing](https://azure.microsoft.com/pricing/details/managed-disks/).
+
+## What tiers can be changed
+
+The following table depicts which tiers each baseline performance tier can upgrade to.
+ | Disk size | Baseline performance tier | Can be upgraded to | |-|--|-| | 4 GiB | P1 | P2, P3, P4, P6, P10, P15, P20, P30, P40, P50 |
Your billing changes as your performance tier changes. For example, if you provi
| 16 TiB | P70 | P80 | | 32 TiB | P80 | None |
-For billing information, see [Managed disk pricing](https://azure.microsoft.com/pricing/details/managed-disks/).
-
-## Restrictions
-- ## Next steps To learn how to change your performance tier, see [portal](disks-performance-tiers-portal.md) or [PowerShell/CLI](disks-performance-tiers.md) articles.
virtual-machines Disks Performance Tiers Portal https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/disks-performance-tiers-portal.md
description: Learn how to change performance tiers for new and existing managed
Previously updated : 09/02/2021 Last updated : 03/24/2022
virtual-machines Disks Performance Tiers https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/disks-performance-tiers.md
description: Learn how to change performance tiers for existing managed disks us
Previously updated : 02/25/2022 Last updated : 03/24/2022
virtual-machines Azure Hybrid Benefit Byos Linux https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/linux/azure-hybrid-benefit-byos-linux.md
>Azure Hybrid Benefit for BYOS VMs is planned for Preview from **30 March 2022**. You can [sign up for the preview here.](https://aka.ms/ahb-linux-form) You will receive a mail from Microsoft once your subscriptions are enabled for Preview.
-Azure Hybrid Benefit for BYOS VMs is a licensing benefit that helps you to get software updates and integrated support for Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES) virtual machines (VMs) directly from Azure infrastructure. This benefit is available to RHEL and SLES custom on-prem image VMs (VMs generated from o- prem images), and to RHEL and SLES Marketplace bring-your-own-subscription (BYOS) VMs.
+Azure Hybrid Benefit for BYOS VMs is a licensing benefit that helps you to get software updates and integrated support for Red Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES) virtual machines (VMs) directly from Azure infrastructure. This benefit is available to RHEL and SLES custom on-prem image VMs (VMs generated from on-prem images), and to RHEL and SLES Marketplace bring-your-own-subscription (BYOS) VMs.
## Benefit description Before AHB BYOS, RHEL and SLES customers who migrated their on-prem machines to Azure by creating images of on-prem systems and migrating them as VMs on Azure did not have the flexibility to get software updates directly from Azure similar to Marketplace PAYG VMs. Hence, you needed to still buy cloud access licenses from the Enterprise Linux distributors to get security support as well as software updates. With Azure Hybrid Benefit for BYOS VMs, we will allow you to get software updates and support for on-prem custom image VMs as well as RHEL and SLES BYOS VMs similar to PAYG VMs by paying the same software fees as charged to PAYG VMs. In addition, these conversions can happen without any redeployment, so you can avoid any downtime risk.
To start using the benefit for SUSE:
1. Wait for 5 minutes for the extension to read the license type value and install the repositories.
-1. You should now be connected to Azure SLES Update Infrastructure and the relevant repositories will be installed in your machine.
+1. You should now be connected to the SUSE Public Cloud Update Infrastructure on Azure and the relevant repositories will be installed in your machine.
1. In case the extension is not running by itself, you can run it on demand as well.
you can use the `az vm update` command to update existing license type on runnin
## Enable and disable the benefit for SLES You can install the `AHBForSLES` extension to install the extension. After successfully installing the extension,
-you can use the `az vm update` command to update existing license type on running VMs. For SLES VMs, run the command and set `--license-type` parameter to one of the following: `SLES_STANDARD`,`SLES_SAP` or `SLES_HPC`.
+you can use the `az vm update` command to update existing license type on running VMs. For SLES VMs, run the command and set `--license-type` parameter to one of the following: `SLES_STANDARD`, `SLES_SAP` or `SLES_HPC`.
### CLI example to enable the benefit for SLES 1. Install the Azure Hybrid Benefit extension on running VM using the portal or via Azure CLI using the command below:
you can use the `az vm update` command to update existing license type on runnin
``` 1. Wait for 5 minutes for the extension to read the license type value and install the repositories.
-1. You should now be connected to Azure SLES Update Infrastructure and the relevant repositories will be installed in your machine. You can check the same by performing the command below on your VM which outputs installed repository packages on your VM:
+1. You should now be connected to the SUSE Public Cloud Update Infrastructure on Azure and the relevant repositories will be installed in your machine. You can verify this by performing the command below on your VM which list SUSE repositories on your VM:
```bash zypper repos ```
To check the status of Azure Hybrid Benefit for BYOS VM status
1. You can view the Azure Hybrid Benefit status of a VM by using the Azure CLI or by using Azure Instance Metadata Service. You can use the below command for this purpose. Look for a `licenseType` field in the response. If the `licenseType` field exists and the value is one of the below, your VM has the benefit enabled:
- `RHEL_BASE`, `RHEL_EUS`, `RHEL_BASESAPAPPS`, `RHEL_SAPHA`, `RHEL_BASESAPAPPS`, `RHEL_BASESAPHA`, `SLES_STANDARD`, `SLES_SAP`, `SLES_`
+ `RHEL_BASE`, `RHEL_EUS`, `RHEL_BASESAPAPPS`, `RHEL_SAPHA`, `RHEL_BASESAPAPPS`, `RHEL_BASESAPHA`, `SLES_STANDARD`, `SLES_SAP`, `SLES_HPC`.
```azurecli az vm get-instance-view -g MyResourceGroup -n MyVm
virtual-machines Storage Performance https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/linux/storage-performance.md
Scenarios that securely erase data to protect the customer include:
- The VM becomes unhealthy and has to service heal to another node due to a hardware issue. - A small number of the planned maintenance servicing operations that requires the VM to be reallocated to another host for servicing.
-To learn more about options for backing up data in local storage, see [Backup and disaster recovery for Azure IaaS disks](../backup-and-disaster-recovery-for-azure-iaas-disks.md).
- ## Frequently asked questions * **How do I start deploying Lsv2-series VMs?**
virtual-machines N Series Migration https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/n-series-migration.md
Last updated 08/15/2020
As more powerful GPUs become available in the marketplace and in Microsoft Azure datacenters, we recommend re-assessing the performance of your workloads and considering migrating to newer GPUs.
-For the same reason, as well as to maintain a high-quality and reliable service offering, Azure periodically retires the hardware that powers older VM sizes. The first group of GPU products to be retired in Azure are the original NC, NC v2 and ND-series VMs, powered by NVIDIA Tesla K80, P100, and P40 datacenter GPU accelerators respectively. These products will be retired on August 31st 2022, and the oldest VMs in this series launched in 2016.
+For the same reason, as well as to maintain a high-quality and reliable service offering, Azure periodically retires the hardware that powers older VM sizes. The first group of GPU products to be retired in Azure are the original NC, NC v2 and ND-series VMs, powered by NVIDIA Tesla K80, P100, and P40 datacenter GPU accelerators respectively. These products will be retired on August 31st 2023, and the oldest VMs in this series launched in 2016.
Since then, GPUs have made incredible strides alongside the entire deep learning and HPC industry, typically exceeding a doubling in performance between generations. Since the launch of NVIDIA K80, P40, and P100 GPUs, Azure has shipped multiple newer generations and categories of VM products geared at GPU-accelerated compute and AI, based around NVIDIAΓÇÖs T4, V100, and A100 GPUs, and differentiated by optional features such as InfiniBand-based interconnect fabrics. These are all options we encourage customers to explore as migration paths.
virtual-machines Nva10v5 Series https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/nva10v5-series.md
The preview is currenty availabe in US South Central and West Europe regions.[Si
| Size | vCPU | Memory: GiB | Temp storage (SSD) GiB | GPU partition | GPU memory: GiB | Max data disks | Max NICs / Expected network bandwidth (MBps) | | | | | | | | | | | Standard_NV6ads_A10_v5 |6 |55 |180 | 1/6 | 4 | 4 | 2 / 5000 |
-| Standard_NV12ads_A10_v5 |12 |110 |360 | 1/3 | 6 | 4 | 2 / 10000 |
+| Standard_NV12ads_A10_v5 |12 |110 |360 | 1/3 | 8 | 4 | 2 / 10000 |
| Standard_NV18ads_A10_v5 |18 |220 |720 | 1/2 | 12 | 8 | 4 / 20000 | | Standard_NV36ads_A10_v5 |36 |440 |720 | 1 | 24 | 16 | 4 / 40000 | | Standard_NV36adms_A10_v5 |36 |880 |720 | 1 | 24 | 32 | 8 / 80000 | | Standard_NV72ads_A10_v5 |72 |880 |1400 | 2 | 48 | 32 | 8 / 80000 | <sup>1</sup> NVadsA10v5-series VMs feature AMD Simultaneous multithreading Technology
+<sup>2</sup> The actual GPU VRAM reported in the operating system will be little less due to Error Correcting Code (ECC) support.
virtual-machines Storage Performance https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/windows/storage-performance.md
Scenarios that securely erase data to protect the customer include:
- The VM becomes unhealthy and has to service heal to another node due to a hardware issue. - A small number of the planned maintenance servicing operations that requires the VM to be reallocated to another host for servicing.
-To learn more about options for backing up data in local storage, see [Backup and disaster recovery for Azure IaaS disks](../backup-and-disaster-recovery-for-azure-iaas-disks.md).
- ## Frequently asked questions * **How do I start deploying Lsv2-series VMs?**
virtual-machines Winrm https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/windows/winrm.md
Title: Set up WinRM access for an Azure VM description: Setup WinRM access for use with an Azure virtual machine created in the Resource Manager deployment model. - - Previously updated : 06/16/2016 Last updated : 3/25/2022
Before uploading the certificate to the Key Vault created in step 1, it needs to
$fileName = "<Path to the .pfx file>" $fileContentBytes = Get-Content $fileName -Encoding Byte $fileContentEncoded = [System.Convert]::ToBase64String($fileContentBytes)- [System.Collections.HashTable]$TableForJSON = @{ "data" = $fileContentEncoded; "dataType" = "pfx"; "password" = "<password>"; } [System.String]$jsonObject = $TableForJSON | ConvertTo-Json
-$jsonEncoded = [System.Convert]::ToBase64String($jsonObject)
-
+$encoding = [System.Text.Encoding]::UTF8
+$jsonEncoded = [System.Convert]::ToBase64String($encoding.GetBytes($jsonObject))
$secret = ConvertTo-SecureString -String $jsonEncoded -AsPlainText ΓÇôForce Set-AzKeyVaultSecret -VaultName "<vault name>" -Name "<secret name>" -SecretValue $secret ```
virtual-machines Cal S4h https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/cal-s4h.md
You will need to authenticate with your S-User or P-User. You can create a P-Use
| Solution | Link | | -- | : |
+| **SAP S/4HANA 2020 FPS01** March 22 2022 | [Create Instance](https://cal.sap.com/registration?sguid=4bad009a-cb02-4992-a8b6-28c331a79c66&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+|This solution comes as a standard S/4HANA system installation including a remote desktop for easy frontend access. It contains a pre-configured and activated SAP S/4HANA Fiori UI in client 100, with prerequisite components activated as per SAP note 3009827 Rapid Activation for SAP Fiori in SAP S/4HANA 2020 FPS01. See More Information Link. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/4bad009a-cb02-4992-a8b6-28c331a79c66) |
+| **SAP Financial Services Data Platform 1.15** March 16 2022 | [Create Instance](https://cal.sap.com/registration?sguid=310f0bd9-fcad-4ecb-bfea-c61cdc67152b&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+|SAP Financial Services Data Management aims to support customers in the building of a data platform for the banking and insurance industries on SAP HANA. It helps the customer to reduce redundancies by managing enterprise data with a "single source of truth" approach through a harmonized integrated data model. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/310f0bd9-fcad-4ecb-bfea-c61cdc67152b) |
| **SAP S/4HANA 2020 FPS02 for Productive Deployments** December 06 2021 | [Create Instance](https://cal.sap.com/registration?sguid=6562b978-0df0-4b2d-a114-22ba359006ca&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |This solution comes as a standard S/4HANA system installation including High Availability capabilities to ensure higher system uptime for productive usage. The system parameters can be customized during initial provisioning according to the requirements for the target system. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/6562b978-0df0-4b2d-a114-22ba359006ca) | | **SAP S/4HANA 2020 FPS02, Fully-Activated Appliance** July 27 2021 | [Create Instance](https://cal.sap.com/registration?sguid=d48af08b-e2c6-4409-82f8-e42d5610e918&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |This appliance contains SAP S/4HANA 2020 (FPS02) with pre-activated SAP Best Practices for SAP S/4HANA core functions, and further scenarios for Service, Master Data Governance (MDG), Transportation Mgmt. (TM), Portfolio Mgmt. (PPM), Human Capital Management (HCM), Analytics, Migration Cockpit, and more. User access happens via SAP Fiori, SAP GUI, SAP HANA Studio, Windows remote desktop, or the backend operating system for full administrative access. | [Details]( https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/d48af08b-e2c6-4409-82f8-e42d5610e918) |
+| **SAP S/4HANA 2021, Fully-Activated Appliance** December 08 2021 | [Create Instance](https://cal.sap.com/registration?sguid=b8a9077c-f0f7-47bd-977c-70aa6a6a2aa7&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+|This appliance contains SAP S/4HANA 2021 (SP00) with pre-activated SAP Best Practices for SAP S/4HANA core functions, and further scenarios for Service, Master Data Governance (MDG), Transportation Mgmt. (TM), Portfolio Mgmt. (PPM), Human Capital Management (HCM), Analytics, Migration Cockpit, and more. User access happens via SAP Fiori, SAP GUI, SAP HANA Studio, Windows remote desktop, or the backend operating system for full administrative access. | [Details]( https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/b8a9077c-f0f7-47bd-977c-70aa6a6a2aa7) |
| **SAP S/4HANA 2020 FPS01, Fully-Activated Appliance** April 20 2021 | [Create Instance](https://cal.sap.com/registration?sguid=a0b63a18-0fd3-4d88-bbb9-4f02c13dc343&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |This appliance contains SAP S/4HANA 2020 (FPS01) with pre-activated SAP Best Practices for SAP S/4HANA core functions, and further scenarios for Service, Master Data Governance (MDG), Transportation Mgmt. (TM), Portfolio Mgmt. (PPM), Human Capital Management (HCM), Analytics, Migration Cockpit, and more. User access happens via SAP Fiori, SAP GUI, SAP HANA Studio, Windows remote desktop, or the backend operating system for full administrative access. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/a0b63a18-0fd3-4d88-bbb9-4f02c13dc343) |
-| **SAP S/4HANA 2020 FPS02** Jun 10, 2021 | [Create Instance](https://cal.sap.com/registration?sguid=c7cff775-cbf7-4cd1-a907-6eeca95a0946&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
-| This solution comes as a standard S/4HANA system installation including a remote desktop for easy frontend access. It contains a pre-configured and activated SAP S/4HANA Fiori UI in client 100, with prerequisite components activated as per SAP note 3045635 Rapid Activation for SAP Fiori in SAP S/4HANA 2020 FPS02. See More Information Link. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/c7cff775-cbf7-4cd1-a907-6eeca95a0946) |
+| **SAP S/4HANA 2020 FPS02** February 23 2022 | [Create Instance](https://cal.sap.com/registration?sguid=c3b133c5-fa87-4572-8cc8-e9dac2e43e6d&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+|This solution comes as a standard S/4HANA system installation including a remote desktop for easy frontend access. It contains a pre-configured and activated SAP S/4HANA Fiori UI in client 100, with prerequisite components activated as per SAP note 3045635 Rapid Activation for SAP Fiori in SAP S/4HANA 2020 FPS02. See More Information Link. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/c3b133c5-fa87-4572-8cc8-e9dac2e43e6d) |
| **IDES EHP8 FOR SAP ERP 6.0 on SAP ASE, June 2021** June 10 2021 | [Create Instance](https://cal.sap.com/registration?sguid=ed55a454-0b10-47c5-8644-475ecb8988a0&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |IDES systems are copies of the SAP-internal demo systems and used as playground for customizing and testing. This IDES system specifically can be used as source system in the data migration scenarios of the SAP S/4HANA Fully-Activated Appliance (2020 FPS01 and higher). Besides that, it contains standard business scenarios based on predefined master and transactional data. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/ed55a454-0b10-47c5-8644-475ecb8988a0) |
-| **SAP BW/4HANA 2.0 SP07 including BW/4HANA Content 2.0 SP06** February 24 2021 | [Create Instance](https://cal.sap.com/registration?sguid=0f2f20f4-d012-4f76-81af-6ff15063db66&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
-|This solution offers you an insight of SAP BW/4HANA. SAP BW/4HANA is the next generation Data Warehouse optimized for HANA. Beside the basic BW/4HANA options the solution offers a bunch of HANA optimized BW/4HANA Content and the next step of Hybrid Scenarios with SAP Data Warehouse Cloud. As the system is pre-configured you can start directly implementing your scenarios. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/0f2f20f4-d012-4f76-81af-6ff15063db66) |
-| **SAP Business One 10.0 PL02, version for SAP HANA** August 04 2020 | [Create Instance](https://cal.sap.com/registration?sguid=371edc8c-56c6-4d21-acb4-2d734722c712&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
-|Trusted by over 70,000 small and midsize businesses in 170+ countries, SAP Business One is a flexible, affordable, and scalable ERP solution with the power of SAP HANA. The solution is pre-configured using a 31-day trial license and has a demo database of your choice pre-installed. See the getting started guide to learn about the scope of the solution and how to easily add new demo databases. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/371edc8c-56c6-4d21-acb4-2d734722c712) |
+| **SAP BW/4HANA 2021 including BW/4HANA Content 2.0 SP08** March 08 2022 | [Create Instance](https://cal.sap.com/registration?sguid=26167db3-6ab2-40fc-a8d9-af5b4014c10c&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+|This solution offers you an insight of SAP BW/4HANA. SAP BW/4HANA is the next generation Data Warehouse optimized for SAP HANA. Beside the basic BW/4HANA options the solution offers a bunch of SAP HANA optimized BW/4HANA Content and the next step of Hybrid Scenarios with SAP Data Warehouse Cloud. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/26167db3-6ab2-40fc-a8d9-af5b4014c10c) |
+| **SAP Business One 10.0 PL02, version for SAP HANA** August 24 2020 | [Create Instance](https://cal.sap.com/registration?sguid=371edc8c-56c6-4d21-acb4-2d734722c712&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+|Trusted by over 70,000 small and midsize businesses in 170+ countries, SAP Business One is a flexible, affordable, and scalable ERP solution with the power of SAP HANA. The solution is pre-configured using a 31-day trial license and has a demo database of your choice pre-installed. See the getting started guide to learn about the scope of the solution and how to easily add new demo databases. To secure your system against the CVE-2021-44228 vulnerability, apply SAP Support Note 3131789. For more information, see the Getting Started Guide of this solution (check the "Security Aspects" chapter). | [Details]( https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/371edc8c-56c6-4d21-acb4-2d734722c712) |
| **Information Detector for SAP Data Custodian v2106** August 30 2021 | [Create Instance](https://cal.sap.com/registration?sguid=db44680c-8a2a-405d-8963-838db38fa7dd&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |The information detector for SAP Data Custodian can be used to automate data labeling of cloud resources. Information detectors search through your infrastructure resources and determine whether they contain certain types of information. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/db44680c-8a2a-405d-8963-838db38fa7dd) | | **SAP Yard Logistics 2009 for SAP S/4HANA** Jul 28, 2021 | [Create Instance](https://cal.sap.com/registration?sguid=9cdf4f13-73a5-4743-a213-82e0d1a68742&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
You will need to authenticate with your S-User or P-User. You can create a P-Use
|Solution 3 after performing a technical system conversion from SAP ERP to SAP S/4HANA before additional configuration. It has been tested and prepared as converted from SAP EHP6 for SAP ERP 6.0 SPS13 to SAP S/4HANA 2020 FPS00. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/4336a3fb-2fc9-4a93-9500-c65101ffc9d7) | | **4: SAP S/4HANA target system including additional config (openSAP)** October 17 2021 | [Create Instance](https://cal.sap.com/registration?sguid=f48f2b77-389f-488b-be2b-1c14a86b2e69&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |Solution 4 after performing a technical system conversion from SAP ERP to SAP S/4HANA including additional configuration. It has been tested and prepared as converted from SAP EHP6 for SAP ERP 6.0 SPS13 to SAP S/4HANA 2020 FPS00. | [Details]( https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/f48f2b77-389f-488b-be2b-1c14a86b2e69) |
-| ** SAP Solution Manager 7.2 SP13 & Focused Solutions SP08 (Demo System) with SAP S/4HANA** November 16 2021 | [Create Instance](https://cal.sap.com/registration?sguid=769336fe-cb15-44dc-926c-e3f851adab32&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
+| **SAP Solution Manager 7.2 SP13 & Focused Solutions SP08 (Demo System) with SAP S/4HANA** November 16 2021 | [Create Instance](https://cal.sap.com/registration?sguid=769336fe-cb15-44dc-926c-e3f851adab32&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) |
|SAP Solution Manager 7.2 supports the ΓÇ£business of ITΓÇ¥ with four key value chains: Portfolio to Project (P2P) to drive the portfolio of projects and balance business initiatives and their business value against IT capacity, skills and timelines. Requirement to Deploy (R2D) to build what the business needs. Request to Fulfill (R2F) to catalog, request and fulfill services. Detect to Correct (D2C) to anticipate and resolve production problems. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/769336fe-cb15-44dc-926c-e3f851adab32) | | **Enterprise Management Layer for SAP S/4HANA 2020 FPS02** November 15 2021 | [Create Instance](https://cal.sap.com/registration?sguid=0f85835e-b3d5-4b75-b65e-4d89ed0da409&provider=208b780d-282b-40ca-9590-5dd5ad1e52e8) | |The enterprise management layer for SAP S/4HANA 2020 offers a ready-to-run, pre-configured, localized core template based on pre-activated SAP Best Practices on-premise country versions covering 43 countries. The CAL solution can be used to get familiar with this offering. | [Details](https://cal.sap.com/catalog?provider=208b780d-282b-40ca-9590-5dd5ad1e52e8#/solutions/0f85835e-b3d5-4b75-b65e-4d89ed0da409) |
virtual-machines High Availability Guide Suse Multi Sid https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/high-availability-guide-suse-multi-sid.md
vm-windows Previously updated : 01/24/2022 Last updated : 03/25/2022
To achieve high availability, SAP NetWeaver requires highly available NFS shares
The NFS server, SAP NetWeaver ASCS, SAP NetWeaver SCS, SAP NetWeaver ERS, and the SAP HANA database use virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. We recommend using [Standard load balancer](../../../load-balancer/quickstart-load-balancer-standard-public-portal.md).
-The following list shows the configuration of the (A)SCS and ERS load balancer for this multi-SID cluster example with three SAP systems. You will need separate frontend IP, health probes, and load-balancing rules for each ASCS and ERS instance for each of the SIDs. Assign all VMs, that are part of the ASCS/ASCS cluster to one backend pool.
-
-### (A)SCS
-
-* Frontend configuration
- * IP address for NW1: 10.3.1.14
- * IP address for NW2: 10.3.1.16
- * IP address for NW3: 10.3.1.13
-* Probe Ports
- * Port 620<strong>&lt;nr&gt;</strong>, therefore for NW1, NW2, and NW3 probe ports 620**00**, 620**10** and 620**20**
-* Load-balancing rules -
-* create one for each instance, that is, NW1/ASCS, NW2/ASCS and NW3/ASCS.
- * If using Standard Load Balancer, select **HA ports**
- * If using Basic Load Balancer, create Load balancing rules for the following ports
- * 32<strong>&lt;nr&gt;</strong> TCP
- * 36<strong>&lt;nr&gt;</strong> TCP
- * 39<strong>&lt;nr&gt;</strong> TCP
- * 81<strong>&lt;nr&gt;</strong> TCP
- * 5<strong>&lt;nr&gt;</strong>13 TCP
- * 5<strong>&lt;nr&gt;</strong>14 TCP
- * 5<strong>&lt;nr&gt;</strong>16 TCP
-
-### ERS
-
-* Frontend configuration
- * IP address for NW1 10.3.1.15
- * IP address for NW2 10.3.1.17
- * IP address for NW3 10.3.1.19
-* Probe Port
- * Port 621<strong>&lt;nr&gt;</strong>, therefore for NW1, NW2, and N# probe ports 621**02**, 621**12** and 621**22**
-* Load-balancing rules - create one for each instance, that is, NW1/ERS, NW2/ERS and NW3/ERS.
- * If using Standard Load Balancer, select **HA ports**
- * If using Basic Load Balancer, create Load balancing rules for the following ports
- * 32<strong>&lt;nr&gt;</strong> TCP
- * 33<strong>&lt;nr&gt;</strong> TCP
- * 5<strong>&lt;nr&gt;</strong>13 TCP
- * 5<strong>&lt;nr&gt;</strong>14 TCP
- * 5<strong>&lt;nr&gt;</strong>16 TCP
-
-* Backend configuration
- * Connected to primary network interfaces of all virtual machines that should be part of the (A)SCS/ERS cluster
+The presented configuration for this multi-SID cluster example with three SAP systems shows a load balancer with:
+
+* Frontend IP addresses for ASCS: 10.3.1.14 (NW1), 10.3.1.16 (NW2) and 10.3.1.13 (NW3)
+* Frontend IP addresses for ERS: 10.3.1.15 (NW1), 10.3.1.17 (NW2) and 10.3.1.19 (NW3)
+* Probe port 62000 for NW1 ASCS, 62010 for NW2 ASCS and 62020 for NW3 ASCS
+* Probe port 62102 for NW1 ASCS, 62112 for NW2 ASCS and 62122 for NW3 ASCS
> [!IMPORTANT] > Floating IP is not supported on a NIC secondary IP configuration in load-balancing scenarios. For details see [Azure Load balancer Limitations](../../../load-balancer/load-balancer-multivip-overview.md#limitations). If you need additional IP address for the VM, deploy a second NIC.
virtual-machines High Availability Guide Suse Netapp Files https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/high-availability-guide-suse-netapp-files.md
vm-windows Previously updated : 01/24/2022 Last updated : 03/25/2022
Now it is possible to achieve SAP Netweaver HA by using shared storage, deployed
![SAP NetWeaver High Availability overview](./media/high-availability-guide-suse-anf/high-availability-guide-suse-anf.png)
-SAP NetWeaver ASCS, SAP NetWeaver SCS, SAP NetWeaver ERS, and the SAP HANA database use virtual hostname and virtual IP addresses. On Azure, a [load balancer](../../../load-balancer/load-balancer-overview.md) is required to use a virtual IP address. We recommend using [Standard load balancer](../../../load-balancer/quickstart-load-balancer-standard-public-portal.md). The following list shows the configuration of the (A)SCS and ERS load balancer.
-
-### (A)SCS
-
-* Frontend configuration
- * IP address 10.1.1.20
-* Probe Port
- * Port 620<strong>&lt;nr&gt;</strong>
-* Load-balancing rules
- * If using Standard Load Balancer, select **HA ports**
- * If using Basic Load Balancer, create Load balancing rules for the following ports
- * 32<strong>&lt;nr&gt;</strong> TCP
- * 36<strong>&lt;nr&gt;</strong> TCP
- * 39<strong>&lt;nr&gt;</strong> TCP
- * 81<strong>&lt;nr&gt;</strong> TCP
- * 5<strong>&lt;nr&gt;</strong>13 TCP
- * 5<strong>&lt;nr&gt;</strong>14 TCP
- * 5<strong>&lt;nr&gt;</strong>16 TCP
-
-### ERS
-
-* Frontend configuration
- * IP address 10.1.1.21
-* Probe Port
- * Port 621<strong>&lt;nr&gt;</strong>
-* Load-balancing rules
- * If using Standard Load Balancer, select **HA ports**
- * If using Basic Load Balancer, create Load balancing rules for the following ports
- * 32<strong>&lt;nr&gt;</strong> TCP
- * 33<strong>&lt;nr&gt;</strong> TCP
- * 5<strong>&lt;nr&gt;</strong>13 TCP
- * 5<strong>&lt;nr&gt;</strong>14 TCP
- * 5<strong>&lt;nr&gt;</strong>16 TCP
-
-* Backend configuration
- * Connected to primary network interfaces of all virtual machines that should be part of the (A)SCS/ERS cluster
+SAP NetWeaver ASCS, SAP NetWeaver SCS, SAP NetWeaver ERS, and the SAP HANA database use virtual hostname and virtual IP addresses. On Azure, a [load balancer](../../../load-balancer/load-balancer-overview.md) is required to use a virtual IP address. We recommend using [Standard load balancer](../../../load-balancer/quickstart-load-balancer-standard-public-portal.md). The presented configuration shows a load balancer with:
+* Frontend IP address 10.1.1.20 for ASCS
+* Frontend IP address 10.1.1.21 for ERS
+* Probe port 62000 for ASCS
+* Probe port 62101 for ERS
## Setting up the Azure NetApp Files infrastructure
First you need to create the Azure NetApp Files volumes. Deploy the VMs. Afterwa
1. **Make sure to enable Floating IP** 1. Click OK * Repeat the steps above to create load balancing rules for ERS (for example **lb.QAS.ERS**)
-1. Alternatively, if your scenario requires basic load balancer (internal), follow these steps:
+1. Alternatively, ***only if*** your scenario requires basic load balancer (internal), follow these configuraton steps instead to create basic load balancer:
1. Create the frontend IP addresses 1. IP address 10.1.1.20 for the ASCS 1. Open the load balancer, select frontend IP pool, and click Add
virtual-machines High Availability Guide Suse Nfs Azure Files https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/high-availability-guide-suse-nfs-azure-files.md
After you deploy the VMs for your SAP system, create a load balancer. Then, use
1. **Make sure to enable Floating IP** 1. Click OK * Repeat the steps above to create load balancing rules for ERS (for example **lb.NW1.ERS**)
-1. Alternatively, if your scenario requires basic load balancer (internal), follow these steps:
+1. Alternatively, ***only if*** your scenario requires basic load balancer (internal), follow these steps instead to create basic load balancer:
1. Create the frontend IP addresses 1. IP address 10.90.90.10 for the ASCS 1. Open the load balancer, select frontend IP pool, and click Add
virtual-machines High Availability Guide Suse https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/high-availability-guide-suse.md
vm-windows Previously updated : 01/24/2022 Last updated : 03/25/2022
To achieve high availability, SAP NetWeaver requires an NFS server. The NFS serv
![SAP NetWeaver High Availability overview](./media/high-availability-guide-suse/ha-suse.png)
-The NFS server, SAP NetWeaver ASCS, SAP NetWeaver SCS, SAP NetWeaver ERS, and the SAP HANA database use virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. We recommend using [Standard load balancer](../../../load-balancer/quickstart-load-balancer-standard-public-portal.md). The following list shows the configuration of the (A)SCS and ERS load balancer.
-
-### (A)SCS
-
-* Frontend configuration
- * IP address 10.0.0.7
-* Probe Port
- * Port 620<strong>&lt;nr&gt;</strong>
-* Load balancing rules
- * If using Standard Load Balancer, select **HA ports**
- * If using Basic Load Balancer, create Load balancing rules for the following ports
- * 32<strong>&lt;nr&gt;</strong> TCP
- * 36<strong>&lt;nr&gt;</strong> TCP
- * 39<strong>&lt;nr&gt;</strong> TCP
- * 81<strong>&lt;nr&gt;</strong> TCP
- * 5<strong>&lt;nr&gt;</strong>13 TCP
- * 5<strong>&lt;nr&gt;</strong>14 TCP
- * 5<strong>&lt;nr&gt;</strong>16 TCP
-
-* Backend configuration
- * Connected to primary network interfaces of all virtual machines that should be part of the (A)SCS/ERS cluster
-
-### ERS
-
-* Frontend configuration
- * IP address 10.0.0.8
-* Probe Port
- * Port 621<strong>&lt;nr&gt;</strong>
-* Load-balancing rules
- * If using Standard Load Balancer, select **HA ports**
- * If using Basic Load Balancer, create Load balancing rules for the following ports
- * 32<strong>&lt;nr&gt;</strong> TCP
- * 33<strong>&lt;nr&gt;</strong> TCP
- * 5<strong>&lt;nr&gt;</strong>13 TCP
- * 5<strong>&lt;nr&gt;</strong>14 TCP
- * 5<strong>&lt;nr&gt;</strong>16 TCP
-
-* Backend configuration
- * Connected to primary network interfaces of all virtual machines that should be part of the (A)SCS/ERS cluster
+The NFS server, SAP NetWeaver ASCS, SAP NetWeaver SCS, SAP NetWeaver ERS, and the SAP HANA database use virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. We recommend using [Standard load balancer](../../../load-balancer/quickstart-load-balancer-standard-public-portal.md). The presented configuration shows a load balancer with:
+* Frontend IP address 10.0.0.7 for ASCS
+* Frontend IP address 10.0.0.8 for ERS
+* Probe port 62000 for ASCS
+* Probe port 62101 for ERS
## Setting up a highly available NFS server
You first need to create the virtual machines for this NFS cluster. Afterwards,
1. **Make sure to enable Floating IP** 1. Click OK * Repeat the steps above to create load balancing rules for ERS (for example **nw1-lb-ers**)
-1. Alternatively, if your scenario requires basic load balancer (internal), follow these steps:
+1. Alternatively, ***only if*** your scenario requires basic load balancer (internal), follow these configuraton steps instead to create basic load balancer:
1. Create the frontend IP addresses 1. IP address 10.0.0.7 for the ASCS 1. Open the load balancer, select frontend IP pool, and click Add
virtual-machines Sap Hana High Availability Netapp Files Red Hat https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-red-hat.md
vm-linux Previously updated : 10/08/2021 Last updated : 03/24/2021
Mounted on node2 (**hanadb2**)
> [!NOTE] > File systems /hana/shared, /hana/data and /hana/log are not shared between the two nodes. Each cluster node has its own, separate file systems.
-The SAP HANA System Replication configuration uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The following list shows the configuration of the load balancer:
+The SAP HANA System Replication configuration uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The presented configuration shows a load balancer with:
-- Front-end configuration: IP address 10.32.0.10 for hn1-db-- Back-end configuration: Connected to primary network interfaces of all virtual machines that should be part of HANA System Replication-- Probe Port: Port 62503-- Load-balancing rules: 30313 TCP, 30315 TCP, 30317 TCP, 30340 TCP, 30341 TCP, 30342 TCP (if using Basic Azure Load balancer)
+- Front-end IP address: 10.32.0.10 for hn1-db
+- Probe Port: 62503
## Set up the Azure NetApp File infrastructure
First you need to create the Azure NetApp Files volumes. Then do the following s
> [!NOTE] > When VMs without public IP addresses are placed in the backend pool of internal (no public IP address) Standard Azure load balancer, there will be no outbound internet connectivity, unless additional configuration is performed to allow routing to public end points. For details on how to achieve outbound connectivity see [Public endpoint connectivity for Virtual Machines using Azure Standard Load Balancer in SAP high-availability scenarios](./high-availability-guide-standard-load-balancer-outbound-connections.md).
-8. If using standard load balancer, follow these configuration steps:
+8. To set up standard load balancer, follow these configuration steps:
1. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP pool**, and select **Add**. 1. Enter the name of the new front-end IP pool (for example, **hana-frontend**).
First you need to create the Azure NetApp Files volumes. Then do the following s
1. Select **OK**.
-9. Alternatively, if your scenario dictates using basic load balancer, follow these configuration steps:
+9. Alternatively, ***only if*** your scenario dictates using basic load balancer, follow these configuration steps instead:
1. Configure the load balancer. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP pool**, and select **Add**. 1. Enter the name of the new front-end IP pool (for example, **hana-frontend**).
virtual-machines Sap Hana High Availability Netapp Files Suse https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/sap-hana-high-availability-netapp-files-suse.md
vm-linux Previously updated : 11/23/2021 Last updated : 03/24/2021
Mounted on node2 (**hanadb2**)
> [!NOTE] > File systems /hana/shared, /hana/data and /hana/log are not shared between the two nodes. Each cluster node has its own, separate file systems.
-SAP high availability HANA System Replication configuration uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The following list shows the configuration of the load balancer:
+SAP high availability HANA System Replication configuration uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The presented configuration shows a load balancer with:
-- Front-end configuration: IP address 10.3.0.50 for hn1-db-- Back-end configuration: Connected to primary network interfaces of all virtual machines that should be part of HANA System Replication-- Probe Port: Port 62503-- Load-balancing rules: 30313 TCP, 30315 TCP, 30317 TCP, 30340 TCP, 30341 TCP, 30342 TCP (if using Basic Azure Load balancer)
+- Front-end configuration IP address: 10.3.0.50 for hn1-db
+- Probe Port: 62503
## Set up the Azure NetApp File infrastructure
First you need to create the Azure NetApp Files volumes. Then do the following s
> [!NOTE] > When VMs without public IP addresses are placed in the backend pool of internal (no public IP address) Standard Azure load balancer, there will be no outbound internet connectivity, unless additional configuration is performed to allow routing to public end points. For details on how to achieve outbound connectivity see [Public endpoint connectivity for Virtual Machines using Azure Standard Load Balancer in SAP high-availability scenarios](./high-availability-guide-standard-load-balancer-outbound-connections.md).
-8. If using standard load balancer, follow these configuration steps:
+8. To set up standard load balancer, follow these configuration steps:
1. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP configuration**, and select **Add**. 1. Enter the name of the new front-end IP (for example, **hana-frontend**).
First you need to create the Azure NetApp Files volumes. Then do the following s
1. Select **OK**.
-9. Alternatively, if your scenario dictates using basic load balancer, follow these configuration steps:
+9. Alternatively, ***only if*** your scenario dictates using basic load balancer, follow these configuration steps instead:
1. Configure the load balancer. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP Configuration**, and select **Add**. 1. Enter the name of the new front-end IP (for example, **hana-frontend**).
virtual-machines Sap Hana High Availability Rhel https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/sap-hana-high-availability-rhel.md
vm-linux Previously updated : 11/02/2021 Last updated : 03/24/2021
To achieve high availability, SAP HANA is installed on two virtual machines. The
![SAP HANA high availability overview](./media/sap-hana-high-availability-rhel/ha-hana.png)
-SAP HANA System Replication setup uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The following list shows the configuration of the load balancer:
+SAP HANA System Replication setup uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The presented configuration shows a load balancer with:
-* Front-end configuration: IP address 10.0.0.13 for hn1-db
-* Back-end configuration: Connected to primary network interfaces of all virtual machines that should be part of HANA System Replication
-* Probe Port: Port 62503
-* Load-balancing rules: 30313 TCP, 30315 TCP, 30317 TCP, 30340 TCP, 30341 TCP, 30342 TCP
+* Front-end IP address: 10.0.0.13 for hn1-db
+* Probe Port: 62503
## Deploy for Linux
To deploy the template, follow these steps:
> [!Note] > When VMs without public IP addresses are placed in the backend pool of internal (no public IP address) Standard Azure load balancer, there will be no outbound internet connectivity, unless additional configuration is performed to allow routing to public end points. For details on how to achieve outbound connectivity see [Public endpoint connectivity for Virtual Machines using Azure Standard Load Balancer in SAP high-availability scenarios](./high-availability-guide-standard-load-balancer-outbound-connections.md).
-1. If using standard load balancer, follow these configuration steps:
+1. To set up standard load balancer, follow these configuration steps:
1. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP pool**, and select **Add**.
To deploy the template, follow these steps:
1. Select **OK**.
-1. Alternatively, if your scenario dictates using basic load balancer, follow these configuration steps:
+1. Alternatively, ***only if*** your scenario dictates using basic load balancer, follow these configuration steps instead:
1. Configure the load balancer. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP pool**, and select **Add**.
virtual-machines Sap Hana High Availability https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-machines/workloads/sap/sap-hana-high-availability.md
vm-linux Previously updated : 02/11/2022 Last updated : 03/24/2022
To achieve high availability, SAP HANA is installed on two virtual machines. The
![SAP HANA high availability overview](./media/sap-hana-high-availability/ha-suse-hana.png)
-SAP HANA System Replication setup uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The following list shows the configuration of the load balancer:
+SAP HANA System Replication setup uses a dedicated virtual hostname and virtual IP addresses. On Azure, a load balancer is required to use a virtual IP address. The presented configuration shows a load balancer with:
-* Front-end configuration: IP address 10.0.0.13 for hn1-db
-* Back-end configuration: Connected to primary network interfaces of all virtual machines that should be part of HANA System Replication
-* Probe Port: Port 62503
-* Load-balancing rules: 30313 TCP, 30315 TCP, 30317 TCP
+* Front-end IP address: 10.0.0.13 for hn1-db
+* Probe Port: 62503
## Deploy for Linux
To deploy the template, follow these steps:
### Manual deployment > [!IMPORTANT]
-> Make sure that the OS you select is SAP certified for SAP HANA on the specific VM types you are using. The list of SAP HANA certified VM types and OS releases for those can be looked up in [SAP HANA Certified IaaS Platforms](https://www.sap.com/dmc/exp/2014-09-02-hana-hardware/enEN/#/solutions?filters=v:deCertified;ve:24;iaas;v:125;v:105;v:99;v:120). Make sure to click into the details of the VM type listed to get the complete list of SAP HANA supported OS releases for the specific VM type
->
+> Make sure that the OS you select is SAP certified for SAP HANA on the specific VM types you are using. The list of SAP HANA certified VM types and OS releases for those can be looked up in [SAP HANA Certified IaaS Platforms](https://www.sap.com/dmc/exp/2014-09-02-hana-hardware/enEN/#/solutions?filters=v:deCertified;ve:24;iaas;v:125;v:105;v:99;v:120). Make sure to click into the details of the VM type listed to get the complete list of SAP HANA supported OS releases for the specific VM type
1. Create a resource group. 1. Create a virtual network. 1. Create an availability set. - Set the max update domain.
-1. Create a load balancer (internal). We recommend [standard load balancer](../../../load-balancer/load-balancer-overview.md).
- - Select the virtual network created in step 2.
+1. Create a load balancer (internal). We recommend [standard load balancer](../../../load-balancer/load-balancer-overview.md). Select the virtual network created in step 2.
1. Create virtual machine 1. - Use a SLES4SAP image in the Azure gallery that is supported for SAP HANA on the VM type you selected. - Select the availability set created in step 3.
To deploy the template, follow these steps:
- Select the availability set created in step 3. 1. Add data disks.
-> [!IMPORTANT]
-> Floating IP is not supported on a NIC secondary IP configuration in load-balancing scenarios. For details see [Azure Load balancer Limitations](../../../load-balancer/load-balancer-multivip-overview.md#limitations). If you need additional IP address for the VM, deploy a second NIC.
+ > [!IMPORTANT]
+ > Floating IP is not supported on a NIC secondary IP configuration in load-balancing scenarios. For details see [Azure Load balancer Limitations](../../../load-balancer/load-balancer-multivip-overview.md#limitations). If you need additional IP address for the VM, deploy a second NIC.
-> [!Note]
-> When VMs without public IP addresses are placed in the backend pool of internal (no public IP address) Standard Azure load balancer, there will be no outbound internet connectivity, unless additional configuration is performed to allow routing to public end points. For details on how to achieve outbound connectivity see [Public endpoint connectivity for Virtual Machines using Azure Standard Load Balancer in SAP high-availability scenarios](./high-availability-guide-standard-load-balancer-outbound-connections.md).
+ > [!Note]
+ > When VMs without public IP addresses are placed in the backend pool of internal (no public IP address) Standard Azure load balancer, there will be no outbound internet connectivity, unless additional configuration is performed to allow routing to public end points. For details on how to achieve outbound connectivity see [Public endpoint connectivity for Virtual Machines using Azure Standard Load Balancer in SAP high-availability scenarios](./high-availability-guide-standard-load-balancer-outbound-connections.md).
-1. If using standard load balancer, follow these configuration steps:
+1. To set up standard load balancer, follow these configuration steps:
1. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP pool**, and select **Add**.
To deploy the template, follow these steps:
1. Make sure to **enable Floating IP**. 1. Select **OK**.
-1. Alternatively, if your scenario dictates using basic load balancer, follow these configuration steps:
+1. Alternatively, ***only if*** your scenario dictates using basic load balancer, follow these configuration steps instead:
1. First, create a front-end IP pool: 1. Open the load balancer, select **frontend IP pool**, and select **Add**.
virtual-network Virtual Networks Overview https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-network/virtual-networks-overview.md
Virtual networks and subnets span all availability zones in a region. You don't
## Pricing
-There is no charge for using Azure VNet, it is free of cost. Standard charges are applicable for resources, such as Virtual Machines (VMs) and other products. To learn more, see [VNet pricing](https://azure.microsoft.com/pricing/details/virtual-network/) and the Azure [pricing calculator](https://azure.microsoft.com/pricing/calculator/).
+There is no charge for using Azure VNet; it is free of cost. Standard charges are applicable for resources, such as Virtual Machines (VMs) and other products. To learn more, see [VNet pricing](https://azure.microsoft.com/pricing/details/virtual-network/) and the Azure [pricing calculator](https://azure.microsoft.com/pricing/calculator/).
## Next steps
+ - Learn about [Azure Virtual Network concepts and best practices](concepts-and-best-practices.md).
+ - To get started using a virtual network, create one, deploy a few VMs to it, and communicate between the VMs. To learn how, see the [Create a virtual network](quick-create-portal.md) quickstart.
virtual-wan Global Hub Profile https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-wan/global-hub-profile.md
Previously updated : 05/26/2021 Last updated : 03/24/2022 # Download a global or hub-based profile for User VPN clients
-Azure Virtual WAN offers two types of connectivity for remote users: global and hub-based. Use the following sections to learn about profile types and how to download them.
+Azure Virtual WAN offers two types of connectivity for remote users: global and hub-based. Use the following sections to learn about profile types and how to download them.
> [!IMPORTANT] > RADIUS authentication supports only the hub-based profile. ## Global profile
-The global profile associated with a User VPN configuration points to a load balancer that includes all active User VPN hubs that are using that User VPN configuration. A user connected to the global profile is directed to the hub that's closest to the user's geographic location. This type of connectivity is useful when users travel to different locations frequently.
+The global profile associated with a User VPN configuration points to a load balancer that includes all active User VPN hubs that are using that User VPN configuration. A user connected to the global profile is directed to the hub that's closest to the user's geographic location. This type of connectivity is useful when users travel to different locations frequently.
-For example, you can associate a VPN configuration with two Virtual WAN hubs, one in West US and one in Southeast Asia. If a user connects to the global profile associated with the User VPN configuration, they'll connect to the closest Virtual WAN hub based on their location.
+For example, you can associate a VPN configuration with two Virtual WAN hubs, one in West US and one in Southeast Asia. If a user connects to the global profile associated with the User VPN configuration, they'll connect to the closest Virtual WAN hub based on their location.
To download the global profile: 1. Go to the virtual WAN.
-2. Select **User VPN configurations**.
-3. Select the configuration for which you want to download the profile.
-4. Select **Download virtual WAN user VPN profile**.
+1. Select **User VPN configurations**.
+1. Select the configuration for which you want to download the profile.
+1. Select **Download virtual WAN user VPN profile**.
-![Screenshot that shows selections for downloading a global profile.](./media/global-hub-profile/global1.png)
+ :::image type="content" source="./media/global-hub-profile/global.png" alt-text="Screenshot that shows selections for downloading a global profile." border="false" lightbox="./media/global-hub-profile/global.png":::
### Include or exclude a hub from a global profile
-By default, every hub that uses a specific User VPN configuration is included in the corresponding global VPN profile. You can choose to exclude a hub from the global VPN profile. If you do, a user won't be load balanced to connect to that hub's gateway if they're using the global VPN profile.
+By default, every hub that uses a specific User VPN configuration is included in the corresponding global VPN profile. You can choose to exclude a hub from the global VPN profile. If you do, a user won't be load balanced to connect to that hub's gateway if they're using the global VPN profile.
To check whether or not the hub is included in the global VPN profile: 1. Go to the hub. 1. On the left panel, go to **User VPN (Point to site)** under **Connectivity**.
-1. See **Gateway attachment state** to determine if this hub is included in the global VPN profile. If the state is **attached**, the hub is included. If the state is **detached**, the hub is not included.
+1. See **Gateway attachment state** to determine if this hub is included in the global VPN profile. If the state is **attached**, the hub is included. If the state is **detached**, the hub isn't included.
:::image type="content" source="./media/global-hub-profile/attachment-state.png" alt-text="Screenshot that shows the attachment state of a gateway."lightbox="./media/global-hub-profile/attachment-state.png":::
-To include or exclude a specific hub from the global VPN profile:
+To include or exclude a specific hub from the global VPN profile:
1. Select **Include/Exclude Gateway from Global Profile**.
- :::image type="content" source="./media/global-hub-profile/include-exclude-1.png" alt-text="Screenshot that shows the button for including or excluding a hub from a profile." lightbox="./media/global-hub-profile/include-exclude-1.png":::
+ :::image type="content" source="./media/global-hub-profile/select-include-exclude.png" alt-text="Screenshot that shows the button for including or excluding a hub from a profile." lightbox="./media/global-hub-profile/select-include-exclude.png":::
1. Make one of the following choices:
- - Select **Exclude** if you want to remove this hub's gateway from the Virtual WAN global User VPN profile. Users who are using the hub-level User VPN profile will still be able to connect to this gateway. Users who are using the WAN-level profile will not be able to connect to this gateway.
+ - Select **Exclude** if you want to remove this hub's gateway from the Virtual WAN global User VPN profile. Users who are using the hub-level User VPN profile will still be able to connect to this gateway. Users who are using the WAN-level profile won't be able to connect to this gateway.
- - Select **Include** if you want to include this hub's gateway in the Virtual WAN global User VPN profile. Users who are using this WAN-level profile will be able to connect to this gateway.
+ - Select **Include** if you want to include this hub's gateway in the Virtual WAN global User VPN profile. Users who are using this WAN-level profile will be able to connect to this gateway.
- :::image type="content" source="./media/global-hub-profile/include-exclude.png" alt-text="Screenshot that shows the Exclude and Include buttons." lightbox="./media/global-hub-profile/include-exclude.png":::
+ :::image type="content" source="./media/global-hub-profile/include-exclude.png" alt-text="Screenshot that shows the Exclude and Include buttons." lightbox="./media/global-hub-profile/include-exclude.png":::
## Hub-based profile The profile points to a single hub. The user can connect to only the particular hub by using this profile. To download the hub-based profile: 1. Go to the virtual WAN.
-2. On the **Overview** page, select the hub.
+1. On the **Overview** page, select the hub.
- ![Screenshot that shows selecting a hub.](./media/global-hub-profile/hub1.png)
-
-3. Select **User VPN (Point to site)**.
-4. Select **Download virtual Hub User VPN profile**.
-
- :::image type="content" source="./media/global-hub-profile/hub2.png" alt-text="Screenshot that shows how to download a hub profile."lightbox="./media/global-hub-profile/hub2.png":::
+ :::image type="content" source="./media/global-hub-profile/hub-overview.png" alt-text="Screenshot that shows selecting a hub." lightbox="./media/global-hub-profile/hub-overview.png":::
-5. Select **EAPTLS** as the authentication type.
-6. Select **Generate and download profile**.
+1. Select **User VPN (Point to site)**.
+1. Select **Download virtual Hub User VPN profile**.
- ![Screenshot that shows the button for generating and downloading a profile.](./media/global-hub-profile/download.png)
+ :::image type="content" source="./media/global-hub-profile/hub-profile.png" alt-text="Screenshot that shows how to download a hub profile." lightbox="./media/global-hub-profile/hub-profile.png":::
+
+1. On the **Download virtual WAN user VPN**, select **EAPTLS** as the authentication type.
+1. Select **Generate and download profile**.
+
+ :::image type="content" source="./media/global-hub-profile/generate.png" alt-text="Screenshot that shows generating and downloading a profile." lightbox="./media/global-hub-profile/generate.png":::
## Next steps
virtual-wan Openvpn Azure Ad Client https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-wan/openvpn-azure-ad-client.md
Title: 'VPN Gateway: VPN client for OpenVPN protocol P2S connections: Azure AD authentication' description: Learn how to use P2S VPN to connect to your VNet using Azure AD authentication.-+ Previously updated : 04/26/2021 Last updated : 03/24/2022
This article helps you configure a VPN client to connect to a virtual network us
For every computer that wants to connect to the VNet via the VPN client, you need to download the Azure VPN Client for the computer, and also configure a VPN client profile. If you want to configure multiple computers, you can create a client profile on one computer, export it, and then import it to other computers.
-### To download the Azure VPN Client
+### To download the Azure VPN client
-1. Download the [Azure VPN Client](https://go.microsoft.com/fwlink/?linkid=2117554) to the computer.
-1. Verify that the Azure VPN Client has permission to run in the background. To check and enable permissions, navigate to **Start -> Settings -> Privacy -> Background Apps**.
-
- * Under **Background Apps**, make sure **Let apps run in the background** is turned **On**.
- * Under **Choose which apps can run in the background**, turn settings for **Azure VPN Client** to **On**.
-
- ![Screenshot showing background apps.](./media/openvpn-azure-ad-client/backgroundpermission.png)
### <a name="cert"></a>To create a certificate-based client profile
To force the import, use the **-f** switch.
## Next steps
-For more information, see [Create an Azure Active Directory tenant for P2S Open VPN connections that use Azure AD authentication](openvpn-azure-ad-tenant.md).
+For more information, see [Create an Azure Active Directory tenant for P2S Open VPN connections that use Azure AD authentication](openvpn-azure-ad-tenant.md).
virtual-wan Virtual Wan Faq https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-wan/virtual-wan-faq.md
The Virtual WAN team has been working on upgrading virtual routers from their cu
Note that youΓÇÖll only be able to update your virtual hub router if all the resources (gateways/route tables/VNET connections) in your hub are in a succeeded state. Additionally, as this operation requires deployment of new VMSS based virtual hub routers, youΓÇÖll face an expected downtime of 30 minutes per hub. Within a single Virtual WAN resource, hubs should be updated one at a time instead of updating multiple at the same time. When the Router Version says ΓÇ£LatestΓÇ¥, then the hub is done updating. There will be no routing behavior changes after this update. If the update fails for any reason, your hub will be auto recovered to the old version to ensure there is still a working setup.
+### Is there a route limit for OpenVPN clients connecting to an Azure P2S VPN gateway?
+
+The route limit for OpenVPN clients is 1000.
+ ## Next steps * For more information about Virtual WAN, see [About Virtual WAN](virtual-wan-about.md).
virtual-wan Virtual Wan Point To Site Azure Ad https://github.com/MicrosoftDocs/azure-docs/commits/main/articles/virtual-wan/virtual-wan-point-to-site-azure-ad.md
In this section, you create a connection between your virtual hub and your VNet.
[!INCLUDE [Connect virtual network](../../includes/virtual-wan-connect-vnet-hub-include.md)]
-## <a name="device"></a>Download User VPN profile
+## <a name="download-profile"></a>Download User VPN profile
All of the necessary configuration settings for the VPN clients are contained in a VPN client configuration zip file. The settings in the zip file help you easily configure the VPN clients. The VPN client configuration files that you generate are specific to the User VPN configuration for your gateway. In this section, you generate and download the files used to configure your VPN clients. [!INCLUDE [Download profile](../../includes/virtual-wan-p2s-download-profile-include.md)]
-## Configure User VPN clients
+## <a name="configure-client"></a>Configure User VPN clients
Each computer that connects must have a client installed. You configure each client by using the VPN User client profile files that you downloaded in the previous steps. Use the article that pertains to the operating system that you want to connect.