diff --git a/.gitignore b/.gitignore index 8a1eb0ae..6738c25b 100644 --- a/.gitignore +++ b/.gitignore @@ -7,9 +7,9 @@ # Generated files .docusaurus .cache-loader -/docs/SDK-API/*.mdx +/docs/APIs-and-SDKs/SDK-API*.mdx /docs/Web-Console-API/*.mdx -/docs/SDK-API/*.js +/docs/APIs-and-SDKs/SDK-API*.js /docs/Web-Console-API/*.js # Misc diff --git a/docs/SDK-API/_category_.json b/docs/APIs-and-SDKs/SDK-API/_category_.json similarity index 100% rename from docs/SDK-API/_category_.json rename to docs/APIs-and-SDKs/SDK-API/_category_.json diff --git a/docs/SDK-Documentation/Advanced/_category_.json b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/_category_.json similarity index 100% rename from docs/SDK-Documentation/Advanced/_category_.json rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/_category_.json diff --git a/docs/SDK-Documentation/Advanced/code-as-a-variant-variable.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/code-as-a-variant-variable.mdx similarity index 98% rename from docs/SDK-Documentation/Advanced/code-as-a-variant-variable.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/code-as-a-variant-variable.mdx index fd1115d2..05cfa562 100644 --- a/docs/SDK-Documentation/Advanced/code-as-a-variant-variable.mdx +++ b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/code-as-a-variant-variable.mdx @@ -2,7 +2,7 @@ import Image from "@theme/IdealImage"; # Code as a Variant Variable -[Variant Variables](/docs/SDK-Documentation/basic-usage#treatment-variables) +[Variant Variables](/docs/APIs-and-SDKs/SDK-Documentation/basic-usage#treatment-variables) are incredibly useful for setting particular values in your experiments, but sometimes it can be useful to run code as a variable instead. This section will show you a few examples of how this can be implemented; connecting your diff --git a/docs/SDK-Documentation/Advanced/context-attributes.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes.mdx diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/_setting-context-attributes.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/_setting-context-attributes.mdx similarity index 98% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/_setting-context-attributes.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/_setting-context-attributes.mdx index b2c74a05..cd7bc980 100644 --- a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/_setting-context-attributes.mdx +++ b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/_setting-context-attributes.mdx @@ -34,7 +34,7 @@ context is ready. One of the most useful and commonly used attributes, is the `user_agent` attribute which, when passed to the SDK, will be split up into multiple attributes that can be used to target specific browsers or devices in the Web Console. You can read -more about how this works in [the Segments section of the Dashboard Settings docs](/docs/web-console-docs/settings#segments). +more about how this works in [the Segments section of the Dashboard Settings docs](/docs/web-console-docs/configuration/settings#segments). diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/dotnet/setAttributes.cs b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/dotnet/setAttributes.cs similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/dotnet/setAttributes.cs rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/dotnet/setAttributes.cs diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/flutter/setAttributes.dart b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/flutter/setAttributes.dart similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/flutter/setAttributes.dart rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/flutter/setAttributes.dart diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/go/setAttributes.go b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/go/setAttributes.go similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/go/setAttributes.go rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/go/setAttributes.go diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/java/setAttributes.java b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/java/setAttributes.java similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/java/setAttributes.java rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/java/setAttributes.java diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/js/setAttributes.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/js/setAttributes.js similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/js/setAttributes.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/js/setAttributes.js diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/php/setAttributes.php b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/php/setAttributes.php similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/php/setAttributes.php rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/php/setAttributes.php diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/python/setAttributes.py b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/python/setAttributes.py similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/python/setAttributes.py rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/python/setAttributes.py diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributes.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributes.js similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributes.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributes.js diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributesAsProps.tsx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributesAsProps.tsx similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributesAsProps.tsx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/react/setAttributesAsProps.tsx diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/ruby/setAttributes.rb b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/ruby/setAttributes.rb similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/ruby/setAttributes.rb rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/ruby/setAttributes.rb diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/swift/setAttributes.swift b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/swift/setAttributes.swift similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/swift/setAttributes.swift rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/swift/setAttributes.swift diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/SetAttributes.vue b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/SetAttributes.vue similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/SetAttributes.vue rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/SetAttributes.vue diff --git a/docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/setAttributes.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/setAttributes.js similarity index 100% rename from docs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/setAttributes.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes/setting-context-attributes/vue/setAttributes.js diff --git a/docs/SDK-Documentation/Advanced/custom-assignments.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments.mdx diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/_custom-assignments.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/_custom-assignments.mdx similarity index 98% rename from docs/SDK-Documentation/Advanced/custom-assignments/_custom-assignments.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/_custom-assignments.mdx index eb7c8b41..c5c76622 100644 --- a/docs/SDK-Documentation/Advanced/custom-assignments/_custom-assignments.mdx +++ b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/_custom-assignments.mdx @@ -40,7 +40,7 @@ Events with custom assignments are **counted as eligible events** by the ABsmart statistics engines. If you are using these methods for development purposes (to force a particular variant for yourself or somebody else on the team) it is likely that you need the -[`context.override` methods](/docs/SDK-Documentation/basic-usage#overriding-treatment-variants) +[`context.override` methods](/docs/APIs-and-SDKs/SDK-Documentation/basic-usage#overriding-treatment-variants) instead. ::: diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignment.cs b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignment.cs similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignment.cs rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignment.cs diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignments.cs b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignments.cs similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignments.cs rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/dotnet/customAssignments.cs diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignment.dart b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignment.dart similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignment.dart rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignment.dart diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignments.dart b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignments.dart similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignments.dart rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/flutter/customAssignments.dart diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/go/customAssignment.go b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/go/customAssignment.go similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/go/customAssignment.go rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/go/customAssignment.go diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/go/customAssignments.go b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/go/customAssignments.go similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/go/customAssignments.go rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/go/customAssignments.go diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/java/customAssignment.java b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/java/customAssignment.java similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/java/customAssignment.java rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/java/customAssignment.java diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/java/customAssignments.java b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/java/customAssignments.java similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/java/customAssignments.java rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/java/customAssignments.java diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/js/customAssignment.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/js/customAssignment.js similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/js/customAssignment.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/js/customAssignment.js diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/js/customAssignments.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/js/customAssignments.js similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/js/customAssignments.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/js/customAssignments.js diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/php/customAssignment.php b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/php/customAssignment.php similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/php/customAssignment.php rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/php/customAssignment.php diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/php/customAssignments.php b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/php/customAssignments.php similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/php/customAssignments.php rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/php/customAssignments.php diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/python/customAssignment.py b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/python/customAssignment.py similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/python/customAssignment.py rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/python/customAssignment.py diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/python/customAssignments.py b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/python/customAssignments.py similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/python/customAssignments.py rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/python/customAssignments.py diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/react/customAssignment.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/react/customAssignment.js similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/react/customAssignment.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/react/customAssignment.js diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/react/customAssignments.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/react/customAssignments.js similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/react/customAssignments.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/react/customAssignments.js diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignment.rb b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignment.rb similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignment.rb rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignment.rb diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignments.rb b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignments.rb similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignments.rb rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/ruby/customAssignments.rb diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignment.swift b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignment.swift similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignment.swift rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignment.swift diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignments.swift b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignments.swift similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignments.swift rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/swift/customAssignments.swift diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignment.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignment.js similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignment.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignment.js diff --git a/docs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignments.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignments.js similarity index 100% rename from docs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignments.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments/vue/customAssignments.js diff --git a/docs/SDK-Documentation/Advanced/finalize.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize.mdx diff --git a/docs/SDK-Documentation/Advanced/finalize/_finalize.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/_finalize.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/_finalize.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/_finalize.mdx diff --git a/docs/SDK-Documentation/Advanced/finalize/dotnet/finalize.cs b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/dotnet/finalize.cs similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/dotnet/finalize.cs rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/dotnet/finalize.cs diff --git a/docs/SDK-Documentation/Advanced/finalize/flutter/finalize.dart b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/flutter/finalize.dart similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/flutter/finalize.dart rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/flutter/finalize.dart diff --git a/docs/SDK-Documentation/Advanced/finalize/go/finalize.go b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/go/finalize.go similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/go/finalize.go rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/go/finalize.go diff --git a/docs/SDK-Documentation/Advanced/finalize/java/finalize.java b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/java/finalize.java similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/java/finalize.java rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/java/finalize.java diff --git a/docs/SDK-Documentation/Advanced/finalize/js/finalize.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/js/finalize.js similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/js/finalize.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/js/finalize.js diff --git a/docs/SDK-Documentation/Advanced/finalize/php/finalize.php b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/php/finalize.php similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/php/finalize.php rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/php/finalize.php diff --git a/docs/SDK-Documentation/Advanced/finalize/python/finalize.py b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/python/finalize.py similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/python/finalize.py rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/python/finalize.py diff --git a/docs/SDK-Documentation/Advanced/finalize/react/finalize.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/react/finalize.js similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/react/finalize.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/react/finalize.js diff --git a/docs/SDK-Documentation/Advanced/finalize/ruby/finalize.rb b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/ruby/finalize.rb similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/ruby/finalize.rb rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/ruby/finalize.rb diff --git a/docs/SDK-Documentation/Advanced/finalize/swift/finalize.swift b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/swift/finalize.swift similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/swift/finalize.swift rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/swift/finalize.swift diff --git a/docs/SDK-Documentation/Advanced/finalize/vue/finalize.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/vue/finalize.js similarity index 100% rename from docs/SDK-Documentation/Advanced/finalize/vue/finalize.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/finalize/vue/finalize.js diff --git a/docs/SDK-Documentation/Advanced/publish.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/publish.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish.mdx diff --git a/docs/SDK-Documentation/Advanced/publish/_publish.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/_publish.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/_publish.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/_publish.mdx diff --git a/docs/SDK-Documentation/Advanced/publish/dotnet/publish.cs b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/dotnet/publish.cs similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/dotnet/publish.cs rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/dotnet/publish.cs diff --git a/docs/SDK-Documentation/Advanced/publish/flutter/publish.dart b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/flutter/publish.dart similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/flutter/publish.dart rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/flutter/publish.dart diff --git a/docs/SDK-Documentation/Advanced/publish/go/publish.go b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/go/publish.go similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/go/publish.go rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/go/publish.go diff --git a/docs/SDK-Documentation/Advanced/publish/java/publish.java b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/java/publish.java similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/java/publish.java rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/java/publish.java diff --git a/docs/SDK-Documentation/Advanced/publish/js/publish.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/js/publish.js similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/js/publish.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/js/publish.js diff --git a/docs/SDK-Documentation/Advanced/publish/php/publish.php b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/php/publish.php similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/php/publish.php rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/php/publish.php diff --git a/docs/SDK-Documentation/Advanced/publish/python/publish.py b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/python/publish.py similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/python/publish.py rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/python/publish.py diff --git a/docs/SDK-Documentation/Advanced/publish/react/publish.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/react/publish.js similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/react/publish.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/react/publish.js diff --git a/docs/SDK-Documentation/Advanced/publish/ruby/publish.rb b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/ruby/publish.rb similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/ruby/publish.rb rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/ruby/publish.rb diff --git a/docs/SDK-Documentation/Advanced/publish/swift/publish.swift b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/swift/publish.swift similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/swift/publish.swift rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/swift/publish.swift diff --git a/docs/SDK-Documentation/Advanced/publish/vue/publish.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/vue/publish.js similarity index 100% rename from docs/SDK-Documentation/Advanced/publish/vue/publish.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/publish/vue/publish.js diff --git a/docs/SDK-Documentation/Advanced/tracking-goals.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals.mdx diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/_tracking-goals.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/_tracking-goals.mdx similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/_tracking-goals.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/_tracking-goals.mdx diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/dotnet/trackingGoals.cs b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/dotnet/trackingGoals.cs similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/dotnet/trackingGoals.cs rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/dotnet/trackingGoals.cs diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/flutter/trackingGoals.dart b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/flutter/trackingGoals.dart similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/flutter/trackingGoals.dart rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/flutter/trackingGoals.dart diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/go/trackingGoals.go b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/go/trackingGoals.go similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/go/trackingGoals.go rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/go/trackingGoals.go diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/java/trackingGoals.java b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/java/trackingGoals.java similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/java/trackingGoals.java rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/java/trackingGoals.java diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/js/trackingGoals.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/js/trackingGoals.js similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/js/trackingGoals.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/js/trackingGoals.js diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/php/trackingGoals.php b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/php/trackingGoals.php similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/php/trackingGoals.php rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/php/trackingGoals.php diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/python/trackingGoals.py b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/python/trackingGoals.py similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/python/trackingGoals.py rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/python/trackingGoals.py diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/react/trackingGoals.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/react/trackingGoals.js similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/react/trackingGoals.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/react/trackingGoals.js diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/ruby/trackingGoals.rb b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/ruby/trackingGoals.rb similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/ruby/trackingGoals.rb rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/ruby/trackingGoals.rb diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/swift/trackingGoals.swift b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/swift/trackingGoals.swift similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/swift/trackingGoals.swift rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/swift/trackingGoals.swift diff --git a/docs/SDK-Documentation/Advanced/tracking-goals/vue/trackingGoals.js b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/vue/trackingGoals.js similarity index 100% rename from docs/SDK-Documentation/Advanced/tracking-goals/vue/trackingGoals.js rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/tracking-goals/vue/trackingGoals.js diff --git a/docs/SDK-Documentation/Advanced/using-custom-fields-in-your-code.mdx b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/using-custom-fields-in-your-code.mdx similarity index 90% rename from docs/SDK-Documentation/Advanced/using-custom-fields-in-your-code.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/Advanced/using-custom-fields-in-your-code.mdx index 258d58cf..866253c2 100644 --- a/docs/SDK-Documentation/Advanced/using-custom-fields-in-your-code.mdx +++ b/docs/APIs-and-SDKs/SDK-Documentation/Advanced/using-custom-fields-in-your-code.mdx @@ -1,6 +1,6 @@ # Custom Fields in Code -In the [Platform Settings on the Web Console](/docs/web-console-docs/settings#platform-settings), you can create +In the [Platform Settings on the Web Console](/docs/web-console-docs/configuration/settings#platform-settings), you can create custom fields that are available to the SDKs. These custom fields can be used to add additional logic to your A/B experiments. @@ -18,7 +18,7 @@ Let's say you wish to override an experiment's allocation to a specific variant ### Creating a Custom Field -Checkout the [docs on creating a custom field](/docs/web-console-docs/settings#custom-fields) for more information on +Checkout the [docs on creating a custom field](/docs/web-console-docs/configuration/settings#custom-fields) for more information on adding a field to your experiment creation form, but for this example, we'll create a field with the following properties: - **Name**: `Developers' variant` @@ -31,7 +31,7 @@ the following properties: - **Section**: `Description` :::note -You may wish to [create a custom section](/docs/web-console-docs/settings#custom-fields) for this field. +You may wish to [create a custom section](/docs/web-console-docs/configuration/settings#custom-fields) for this field. ::: ### Using the Custom Field @@ -64,7 +64,7 @@ edited on the Web Console to change the variant that developers see. :::caution Overriding an experiment allocation will not count the user towards the experiment data. -To learn more about overrides, have a look at the [overrides SDK documentation](/docs/SDK-Documentation/basic-usage#overriding-treatment-variants). +To learn more about overrides, have a look at the [overrides SDK documentation](/docs/APIs-and-SDKs/SDK-Documentation/basic-usage#overriding-treatment-variants). ::: ## Conclusion diff --git a/docs/SDK-Documentation/_category_.json b/docs/APIs-and-SDKs/SDK-Documentation/_category_.json similarity index 100% rename from docs/SDK-Documentation/_category_.json rename to docs/APIs-and-SDKs/SDK-Documentation/_category_.json diff --git a/docs/SDK-Documentation/basic-usage.mdx b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage.mdx similarity index 100% rename from docs/SDK-Documentation/basic-usage.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage.mdx diff --git a/docs/SDK-Documentation/basic-usage/config-api/_config-api.mdx b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/_config-api.mdx similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/_config-api.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/_config-api.mdx diff --git a/docs/SDK-Documentation/basic-usage/config-api/js/configChangeParameters.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/configChangeParameters.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/js/configChangeParameters.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/configChangeParameters.js diff --git a/docs/SDK-Documentation/basic-usage/config-api/js/getVariantVariableKeys.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/getVariantVariableKeys.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/js/getVariantVariableKeys.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/getVariantVariableKeys.js diff --git a/docs/SDK-Documentation/basic-usage/config-api/js/languagesConfig.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/languagesConfig.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/js/languagesConfig.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/languagesConfig.js diff --git a/docs/SDK-Documentation/basic-usage/config-api/js/mergeConfig.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/mergeConfig.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/js/mergeConfig.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/mergeConfig.js diff --git a/docs/SDK-Documentation/basic-usage/config-api/js/mergeTranslationConfigs.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/mergeTranslationConfigs.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/js/mergeTranslationConfigs.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/mergeTranslationConfigs.js diff --git a/docs/SDK-Documentation/basic-usage/config-api/js/returnedConfig.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/returnedConfig.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/config-api/js/returnedConfig.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/config-api/js/returnedConfig.js diff --git a/docs/SDK-Documentation/basic-usage/overriding/_overriding.mdx b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/_overriding.mdx similarity index 98% rename from docs/SDK-Documentation/basic-usage/overriding/_overriding.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/_overriding.mdx index 0f767ee0..e561e3e3 100644 --- a/docs/SDK-Documentation/basic-usage/overriding/_overriding.mdx +++ b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/_overriding.mdx @@ -28,7 +28,7 @@ import FlutterOverriding from "!!raw-loader!./flutter/override.dart"; :::warning Warning Overriden events are typed as ineligible and are **ignored** by the ABsmartly statistics engines. If you want to force a particular experiment's variant and have the event be counted, you -can use the [`customAssignment` methods](/docs/SDK-Documentation/Advanced/custom-assignments) +can use the [`customAssignment` methods](/docs/APIs-and-SDKs/SDK-Documentation/Advanced/custom-assignments) instead, although this **is not recommended**. ::: diff --git a/docs/SDK-Documentation/basic-usage/overriding/dotnet/override.cs b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/dotnet/override.cs similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/dotnet/override.cs rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/dotnet/override.cs diff --git a/docs/SDK-Documentation/basic-usage/overriding/flutter/override.dart b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/flutter/override.dart similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/flutter/override.dart rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/flutter/override.dart diff --git a/docs/SDK-Documentation/basic-usage/overriding/go/override.go b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/go/override.go similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/go/override.go rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/go/override.go diff --git a/docs/SDK-Documentation/basic-usage/overriding/java/override.java b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/java/override.java similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/java/override.java rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/java/override.java diff --git a/docs/SDK-Documentation/basic-usage/overriding/js/override.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/js/override.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/js/override.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/js/override.js diff --git a/docs/SDK-Documentation/basic-usage/overriding/php/override.php b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/php/override.php similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/php/override.php rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/php/override.php diff --git a/docs/SDK-Documentation/basic-usage/overriding/python/override.py b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/python/override.py similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/python/override.py rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/python/override.py diff --git a/docs/SDK-Documentation/basic-usage/overriding/react/override.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/react/override.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/react/override.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/react/override.js diff --git a/docs/SDK-Documentation/basic-usage/overriding/ruby/override.rb b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/ruby/override.rb similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/ruby/override.rb rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/ruby/override.rb diff --git a/docs/SDK-Documentation/basic-usage/overriding/swift/override.swift b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/swift/override.swift similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/swift/override.swift rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/swift/override.swift diff --git a/docs/SDK-Documentation/basic-usage/overriding/vue/inInitialization.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/vue/inInitialization.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/vue/inInitialization.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/vue/inInitialization.js diff --git a/docs/SDK-Documentation/basic-usage/overriding/vue/withOverrideMethods.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/vue/withOverrideMethods.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/overriding/vue/withOverrideMethods.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/overriding/vue/withOverrideMethods.js diff --git a/docs/SDK-Documentation/basic-usage/peeking/_peeking.mdx b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/_peeking.mdx similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/_peeking.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/_peeking.mdx diff --git a/docs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariables.cs b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariables.cs similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariables.cs rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariables.cs diff --git a/docs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariants.cs b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariants.cs similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariants.cs rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/dotnet/peekAtVariants.cs diff --git a/docs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariables.dart b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariables.dart similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariables.dart rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariables.dart diff --git a/docs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariants.dart b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariants.dart similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariants.dart rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/flutter/peekAtVariants.dart diff --git a/docs/SDK-Documentation/basic-usage/peeking/go/peekAtVariables.go b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/go/peekAtVariables.go similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/go/peekAtVariables.go rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/go/peekAtVariables.go diff --git a/docs/SDK-Documentation/basic-usage/peeking/go/peekAtVariants.go b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/go/peekAtVariants.go similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/go/peekAtVariants.go rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/go/peekAtVariants.go diff --git a/docs/SDK-Documentation/basic-usage/peeking/java/peekAtVariables.java b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/java/peekAtVariables.java similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/java/peekAtVariables.java rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/java/peekAtVariables.java diff --git a/docs/SDK-Documentation/basic-usage/peeking/java/peekAtVariants.java b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/java/peekAtVariants.java similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/java/peekAtVariants.java rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/java/peekAtVariants.java diff --git a/docs/SDK-Documentation/basic-usage/peeking/js/peekAtVariables.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/js/peekAtVariables.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/js/peekAtVariables.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/js/peekAtVariables.js diff --git a/docs/SDK-Documentation/basic-usage/peeking/js/peekAtVariant.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/js/peekAtVariant.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/js/peekAtVariant.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/js/peekAtVariant.js diff --git a/docs/SDK-Documentation/basic-usage/peeking/php/peekAtVariable.php b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/php/peekAtVariable.php similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/php/peekAtVariable.php rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/php/peekAtVariable.php diff --git a/docs/SDK-Documentation/basic-usage/peeking/php/peekAtVariant.php b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/php/peekAtVariant.php similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/php/peekAtVariant.php rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/php/peekAtVariant.php diff --git a/docs/SDK-Documentation/basic-usage/peeking/python/peekAtVariables.py b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/python/peekAtVariables.py similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/python/peekAtVariables.py rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/python/peekAtVariables.py diff --git a/docs/SDK-Documentation/basic-usage/peeking/python/peekAtVariant.py b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/python/peekAtVariant.py similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/python/peekAtVariant.py rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/python/peekAtVariant.py diff --git a/docs/SDK-Documentation/basic-usage/peeking/react/peekAtVariables.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/react/peekAtVariables.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/react/peekAtVariables.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/react/peekAtVariables.js diff --git a/docs/SDK-Documentation/basic-usage/peeking/react/peekAtVariant.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/react/peekAtVariant.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/react/peekAtVariant.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/react/peekAtVariant.js diff --git a/docs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariables.rb b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariables.rb similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariables.rb rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariables.rb diff --git a/docs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariants.rb b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariants.rb similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariants.rb rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/ruby/peekAtVariants.rb diff --git a/docs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariables.swift b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariables.swift similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariables.swift rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariables.swift diff --git a/docs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariants.swift b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariants.swift similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariants.swift rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/swift/peekAtVariants.swift diff --git a/docs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariables.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariables.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariables.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariables.js diff --git a/docs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariant.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariant.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariant.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/peeking/vue/peekAtVariant.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/_selecting-a-treatment.mdx b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/_selecting-a-treatment.mdx similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/_selecting-a-treatment.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/_selecting-a-treatment.mdx diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/dotnet/selectingATreatment.cs b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/dotnet/selectingATreatment.cs similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/dotnet/selectingATreatment.cs rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/dotnet/selectingATreatment.cs diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/flutter/selectingATreatment.dart b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/flutter/selectingATreatment.dart similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/flutter/selectingATreatment.dart rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/flutter/selectingATreatment.dart diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/go/selectingATreatment.go b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/go/selectingATreatment.go similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/go/selectingATreatment.go rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/go/selectingATreatment.go diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/java/selectingATreatment.java b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/java/selectingATreatment.java similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/java/selectingATreatment.java rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/java/selectingATreatment.java diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/js/selectingATreatment.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/js/selectingATreatment.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/js/selectingATreatment.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/js/selectingATreatment.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/php/selectingATreatment.php b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/php/selectingATreatment.php similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/php/selectingATreatment.php rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/php/selectingATreatment.php diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/python/selectingATreatment.py b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/python/selectingATreatment.py similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/python/selectingATreatment.py rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/python/selectingATreatment.py diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/react/selectingATreatment.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/react/selectingATreatment.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/react/selectingATreatment.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/react/selectingATreatment.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingTernaryOperator.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingTernaryOperator.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingTernaryOperator.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingTernaryOperator.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingUseTreatment.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingUseTreatment.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingUseTreatment.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/react/usingUseTreatment.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/ruby/selectingATreatment.rb b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/ruby/selectingATreatment.rb similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/ruby/selectingATreatment.rb rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/ruby/selectingATreatment.rb diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/swift/selectingATreatment.swift b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/swift/selectingATreatment.swift similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/swift/selectingATreatment.swift rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/swift/selectingATreatment.swift diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingOnlyDefaultSlot.vue b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingOnlyDefaultSlot.vue similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingOnlyDefaultSlot.vue rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingOnlyDefaultSlot.vue diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentAlias.vue b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentAlias.vue similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentAlias.vue rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentAlias.vue diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentIndex.vue b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentIndex.vue similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentIndex.vue rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-treatment/vue/UsingTreatmentIndex.vue diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/_selecting-a-variable.mdx b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/_selecting-a-variable.mdx similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/_selecting-a-variable.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/_selecting-a-variable.mdx diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/dotnet/selectingAVariable.cs b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/dotnet/selectingAVariable.cs similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/dotnet/selectingAVariable.cs rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/dotnet/selectingAVariable.cs diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/flutter/selectingAVariable.dart b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/flutter/selectingAVariable.dart similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/flutter/selectingAVariable.dart rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/flutter/selectingAVariable.dart diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/go/selectingAVariable.go b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/go/selectingAVariable.go similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/go/selectingAVariable.go rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/go/selectingAVariable.go diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/java/selectingAVariable.java b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/java/selectingAVariable.java similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/java/selectingAVariable.java rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/java/selectingAVariable.java diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/js/selectingAVariable.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/js/selectingAVariable.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/js/selectingAVariable.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/js/selectingAVariable.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/php/selectingAVariable.php b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/php/selectingAVariable.php similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/php/selectingAVariable.php rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/php/selectingAVariable.php diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/python/selectingAVariable.py b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/python/selectingAVariable.py similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/python/selectingAVariable.py rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/python/selectingAVariable.py diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/react/directlyInTreatmentComponent.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/react/directlyInTreatmentComponent.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/react/directlyInTreatmentComponent.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/react/directlyInTreatmentComponent.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/react/inUseEffect.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/react/inUseEffect.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/react/inUseEffect.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/react/inUseEffect.js diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/ruby/selectingAVariable.rb b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/ruby/selectingAVariable.rb similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/ruby/selectingAVariable.rb rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/ruby/selectingAVariable.rb diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/swift/selectingAVariable.swift b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/swift/selectingAVariable.swift similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/swift/selectingAVariable.swift rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/swift/selectingAVariable.swift diff --git a/docs/SDK-Documentation/basic-usage/selecting-a-variable/vue/selectingAVariable.js b/docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/vue/selectingAVariable.js similarity index 100% rename from docs/SDK-Documentation/basic-usage/selecting-a-variable/vue/selectingAVariable.js rename to docs/APIs-and-SDKs/SDK-Documentation/basic-usage/selecting-a-variable/vue/selectingAVariable.js diff --git a/docs/SDK-Documentation/getting-started.mdx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started.mdx similarity index 100% rename from docs/SDK-Documentation/getting-started.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started.mdx diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/_create-new-context-request.mdx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/_create-new-context-request.mdx similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/_create-new-context-request.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/_create-new-context-request.mdx diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/async.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/async.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/async.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/async.cs diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/extraUnits.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/extraUnits.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/extraUnits.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/extraUnits.cs diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/prefetchedData.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/prefetchedData.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/prefetchedData.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/prefetchedData.cs diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithFreshData.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithFreshData.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithFreshData.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithFreshData.cs diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithRefreshMethod.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithRefreshMethod.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithRefreshMethod.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/refreshWithRefreshMethod.cs diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/sync.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/sync.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/dotnet/sync.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/dotnet/sync.cs diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/flutter/async.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/async.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/flutter/async.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/async.dart diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/flutter/extraUnits.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/extraUnits.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/flutter/extraUnits.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/extraUnits.dart diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithFreshData.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithFreshData.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithFreshData.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithFreshData.dart diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithRefresh.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithRefresh.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithRefresh.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/refreshWithRefresh.dart diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/flutter/sync.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/sync.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/flutter/sync.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/sync.dart diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/flutter/withPrefetchedData.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/withPrefetchedData.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/flutter/withPrefetchedData.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/flutter/withPrefetchedData.dart diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/go/async.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/async.go similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/go/async.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/async.go diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/go/extraUnits.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/extraUnits.go similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/go/extraUnits.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/extraUnits.go diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithFreshData.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithFreshData.go similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithFreshData.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithFreshData.go diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithRefreshMethod.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithRefreshMethod.go similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithRefreshMethod.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/refreshWithRefreshMethod.go diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/go/sync.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/sync.go similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/go/sync.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/go/sync.go diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/java/async.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/async.java similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/java/async.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/async.java diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/java/extraUnits.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/extraUnits.java similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/java/extraUnits.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/extraUnits.java diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/java/preFetchedData.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/preFetchedData.java similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/java/preFetchedData.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/preFetchedData.java diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithFreshData.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithFreshData.java similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithFreshData.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithFreshData.java diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithRefreshMethod.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithRefreshMethod.java similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithRefreshMethod.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/refreshWithRefreshMethod.java diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/java/sync.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/sync.java similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/java/sync.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/java/sync.java diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestCancellation.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestCancellation.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestCancellation.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestCancellation.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestTimeout.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestTimeout.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestTimeout.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/httpRequestTimeout.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithFreshData.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithFreshData.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithFreshData.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithFreshData.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithRefreshMethod.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithRefreshMethod.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithRefreshMethod.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/refreshWithRefreshMethod.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/usingAsyncAwait.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/usingAsyncAwait.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/usingAsyncAwait.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/usingAsyncAwait.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/usingRawPromises.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/usingRawPromises.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/usingRawPromises.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/usingRawPromises.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/js/withPreFetchedData.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/withPreFetchedData.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/js/withPreFetchedData.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/js/withPreFetchedData.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/php/extraUnits.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/extraUnits.php similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/php/extraUnits.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/extraUnits.php diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/php/prefetchedData.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/prefetchedData.php similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/php/prefetchedData.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/prefetchedData.php diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/php/refresh.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/refresh.php similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/php/refresh.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/refresh.php diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/php/sync.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/sync.php similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/php/sync.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/php/sync.php diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/python/async.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/async.py similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/python/async.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/async.py diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/python/extraUnits.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/extraUnits.py similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/python/extraUnits.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/extraUnits.py diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithFreshData.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithFreshData.py similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithFreshData.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithFreshData.py diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithRefreshMethod.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithRefreshMethod.py similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithRefreshMethod.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/refreshWithRefreshMethod.py diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/python/sync.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/sync.py similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/python/sync.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/sync.py diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/python/withPrefetchedData.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/withPrefetchedData.py similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/python/withPrefetchedData.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/python/withPrefetchedData.py diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/react/forTernaryOperator.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/react/forTernaryOperator.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/react/forTernaryOperator.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/react/forTernaryOperator.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/react/newContext.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/react/newContext.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/react/newContext.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/react/newContext.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/react/withABSmartly.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/react/withABSmartly.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/react/withABSmartly.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/react/withABSmartly.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/ruby/createContext.rb b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/createContext.rb similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/ruby/createContext.rb rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/createContext.rb diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnits.rb b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnits.rb similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnits.rb rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnits.rb diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnitsAlt.rb b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnitsAlt.rb similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnitsAlt.rb rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/extraUnitsAlt.rb diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/ruby/prefetchedData.rb b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/prefetchedData.rb similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/ruby/prefetchedData.rb rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/ruby/prefetchedData.rb diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/swift/newContext.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/newContext.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/swift/newContext.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/newContext.swift diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithFreshData.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithFreshData.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithFreshData.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithFreshData.swift diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithRefreshMethod.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithRefreshMethod.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithRefreshMethod.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/refreshWithRefreshMethod.swift diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/swift/settingExtraUnits.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/settingExtraUnits.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/swift/settingExtraUnits.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/settingExtraUnits.swift diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/swift/withPreFetchedData.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/withPreFetchedData.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/swift/withPreFetchedData.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/swift/withPreFetchedData.swift diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue/RefreshWithFreshData.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue/RefreshWithFreshData.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue/RefreshWithFreshData.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue/RefreshWithFreshData.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue/initializeWithPrefetchedData.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue/initializeWithPrefetchedData.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue/initializeWithPrefetchedData.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue/initializeWithPrefetchedData.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue/refreshWithRefreshMethod.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue/refreshWithRefreshMethod.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue/refreshWithRefreshMethod.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue/refreshWithRefreshMethod.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue3/manualRefresh.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/manualRefresh.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue3/manualRefresh.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/manualRefresh.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue3/prefetched.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/prefetched.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue3/prefetched.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/prefetched.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue3/refresh.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/refresh.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue3/refresh.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/refresh.js diff --git a/docs/SDK-Documentation/getting-started/create-new-context-request/vue3/sync.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/sync.js similarity index 100% rename from docs/SDK-Documentation/getting-started/create-new-context-request/vue3/sync.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/create-new-context-request/vue3/sync.js diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/_custom-event-logger.mdx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/_custom-event-logger.mdx similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/_custom-event-logger.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/_custom-event-logger.mdx diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLogger.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLogger.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLogger.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLogger.cs diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLoggerInjection.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLoggerInjection.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLoggerInjection.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/dotnet/customEventLoggerInjection.cs diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/flutter/customEventLogger.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/flutter/customEventLogger.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/flutter/customEventLogger.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/flutter/customEventLogger.dart diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/flutter/usage.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/flutter/usage.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/flutter/usage.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/flutter/usage.dart diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/go/customEventLogger.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/go/customEventLogger.go similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/go/customEventLogger.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/go/customEventLogger.go diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/java/customEventLogger.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/java/customEventLogger.java similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/java/customEventLogger.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/java/customEventLogger.java diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/js/customEventLogger.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/js/customEventLogger.js similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/js/customEventLogger.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/js/customEventLogger.js diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/php/customEventLogger.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/php/customEventLogger.php similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/php/customEventLogger.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/php/customEventLogger.php diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/php/handleEvent.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/php/handleEvent.php similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/php/handleEvent.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/php/handleEvent.php diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/python/customEventLogger.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/python/customEventLogger.py similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/python/customEventLogger.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/python/customEventLogger.py diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/react/customEventLogger.jsx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/react/customEventLogger.jsx similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/react/customEventLogger.jsx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/react/customEventLogger.jsx diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/swift/customEventLogger.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/swift/customEventLogger.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/swift/customEventLogger.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/swift/customEventLogger.swift diff --git a/docs/SDK-Documentation/getting-started/custom-event-logger/vue/customEventLogger.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/vue/customEventLogger.js similarity index 100% rename from docs/SDK-Documentation/getting-started/custom-event-logger/vue/customEventLogger.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/custom-event-logger/vue/customEventLogger.js diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/_import-and-initialize.mdx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/_import-and-initialize.mdx similarity index 99% rename from docs/SDK-Documentation/getting-started/import-and-initialize/_import-and-initialize.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/_import-and-initialize.mdx index 876669ea..defb8f01 100644 --- a/docs/SDK-Documentation/getting-started/import-and-initialize/_import-and-initialize.mdx +++ b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/_import-and-initialize.mdx @@ -34,9 +34,9 @@ import FlutterImport from "!!raw-loader!./flutter/import.dart"; Once the SDK is installed, it can be initialized in your project. :::info -The following examples assume that an [Api Key with SDK permissions](/docs/web-console-docs/settings#api-keys), -an [Application](/docs/web-console-docs/tutorial#2-setting-your-applications), and an -[Environment](/docs/web-console-docs/settings#environments) have been created in the ABsmartly Web Console. +The following examples assume that an [Api Key with SDK permissions](/docs/web-console-docs/configuration/settings#api-keys), +an [Application](/docs/web-console-docs/Configuration/Applications), and an +[Environment](/docs/web-console-docs/configuration/settings#environments) have been created in the ABsmartly Web Console. ::: diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/dotnet/manualSDKInstance.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/dotnet/manualSDKInstance.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/dotnet/manualSDKInstance.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/dotnet/manualSDKInstance.cs diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/dotnet/sdkInjection.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/dotnet/sdkInjection.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/dotnet/sdkInjection.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/dotnet/sdkInjection.cs diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/dotnet/startupCode.cs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/dotnet/startupCode.cs similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/dotnet/startupCode.cs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/dotnet/startupCode.cs diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/flutter/import.dart b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/flutter/import.dart similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/flutter/import.dart rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/flutter/import.dart diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/go/initialize.go b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/go/initialize.go similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/go/initialize.go rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/go/initialize.go diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/java/import.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/java/import.java similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/java/import.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/java/import.java diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/java/importForAndroid6.java b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/java/importForAndroid6.java similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/java/importForAndroid6.java rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/java/importForAndroid6.java diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/js/import.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/js/import.js similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/js/import.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/js/import.js diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/php/granularChoice.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/php/granularChoice.php similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/php/granularChoice.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/php/granularChoice.php diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/php/importAndInitialize.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/php/importAndInitialize.php similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/php/importAndInitialize.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/php/importAndInitialize.php diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/php/paramsInOrder.php b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/php/paramsInOrder.php similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/php/paramsInOrder.php rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/php/paramsInOrder.php diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/python/import.py b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/python/import.py similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/python/import.py rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/python/import.py diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/react/import.jsx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/react/import.jsx similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/react/import.jsx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/react/import.jsx diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/ruby/import.rb b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/ruby/import.rb similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/ruby/import.rb rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/ruby/import.rb diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/swift/import.swift b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/swift/import.swift similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/swift/import.swift rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/swift/import.swift diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/vue/import.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/vue/import.js similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/vue/import.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/vue/import.js diff --git a/docs/SDK-Documentation/getting-started/import-and-initialize/vue3/import.js b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/vue3/import.js similarity index 100% rename from docs/SDK-Documentation/getting-started/import-and-initialize/vue3/import.js rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/import-and-initialize/vue3/import.js diff --git a/docs/SDK-Documentation/getting-started/install/_install.mdx b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/_install.mdx similarity index 100% rename from docs/SDK-Documentation/getting-started/install/_install.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/_install.mdx diff --git a/docs/SDK-Documentation/getting-started/install/dotnet/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/dotnet/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/dotnet/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/dotnet/install.bash diff --git a/docs/SDK-Documentation/getting-started/install/flutter/install.yaml b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/flutter/install.yaml similarity index 100% rename from docs/SDK-Documentation/getting-started/install/flutter/install.yaml rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/flutter/install.yaml diff --git a/docs/SDK-Documentation/getting-started/install/go/dependencies.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/go/dependencies.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/go/dependencies.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/go/dependencies.bash diff --git a/docs/SDK-Documentation/getting-started/install/go/go.mod b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/go/go.mod similarity index 100% rename from docs/SDK-Documentation/getting-started/install/go/go.mod rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/go/go.mod diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/checksums/checksums.lock b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/checksums/checksums.lock similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/checksums/checksums.lock rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/checksums/checksums.lock diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/dependencies-accessors.lock b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/dependencies-accessors.lock similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/dependencies-accessors.lock rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/dependencies-accessors.lock diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/gc.properties b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/gc.properties similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/gc.properties rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/dependencies-accessors/gc.properties diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileChanges/last-build.bin b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileChanges/last-build.bin similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileChanges/last-build.bin rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileChanges/last-build.bin diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileHashes/fileHashes.lock b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileHashes/fileHashes.lock similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileHashes/fileHashes.lock rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/fileHashes/fileHashes.lock diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/gc.properties b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/gc.properties similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/gc.properties rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/7.4.2/gc.properties diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/buildOutputCleanup.lock b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/buildOutputCleanup.lock similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/buildOutputCleanup.lock rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/buildOutputCleanup.lock diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/cache.properties b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/cache.properties similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/cache.properties rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/buildOutputCleanup/cache.properties diff --git a/docs/SDK-Documentation/getting-started/install/java/.gradle/vcs-1/gc.properties b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/vcs-1/gc.properties similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.gradle/vcs-1/gc.properties rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.gradle/vcs-1/gc.properties diff --git a/docs/SDK-Documentation/getting-started/install/java/.project b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.project similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.project rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.project diff --git a/docs/SDK-Documentation/getting-started/install/java/.settings/org.eclipse.buildship.core.prefs b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.settings/org.eclipse.buildship.core.prefs similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/.settings/org.eclipse.buildship.core.prefs rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/.settings/org.eclipse.buildship.core.prefs diff --git a/docs/SDK-Documentation/getting-started/install/java/build.gradle b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/build.gradle similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/build.gradle rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/build.gradle diff --git a/docs/SDK-Documentation/getting-started/install/java/pom.xml b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/pom.xml similarity index 100% rename from docs/SDK-Documentation/getting-started/install/java/pom.xml rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/java/pom.xml diff --git a/docs/SDK-Documentation/getting-started/install/js/directImport.html b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/js/directImport.html similarity index 100% rename from docs/SDK-Documentation/getting-started/install/js/directImport.html rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/js/directImport.html diff --git a/docs/SDK-Documentation/getting-started/install/js/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/js/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/js/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/js/install.bash diff --git a/docs/SDK-Documentation/getting-started/install/php/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/php/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/php/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/php/install.bash diff --git a/docs/SDK-Documentation/getting-started/install/python/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/python/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/python/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/python/install.bash diff --git a/docs/SDK-Documentation/getting-started/install/react/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/react/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/react/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/react/install.bash diff --git a/docs/SDK-Documentation/getting-started/install/ruby/bundler.sh b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/ruby/bundler.sh similarity index 100% rename from docs/SDK-Documentation/getting-started/install/ruby/bundler.sh rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/ruby/bundler.sh diff --git a/docs/SDK-Documentation/getting-started/install/ruby/gem.sh b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/ruby/gem.sh similarity index 100% rename from docs/SDK-Documentation/getting-started/install/ruby/gem.sh rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/ruby/gem.sh diff --git a/docs/SDK-Documentation/getting-started/install/swift/Podfile b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/swift/Podfile similarity index 100% rename from docs/SDK-Documentation/getting-started/install/swift/Podfile rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/swift/Podfile diff --git a/docs/SDK-Documentation/getting-started/install/swift/updateXcode.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/swift/updateXcode.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/swift/updateXcode.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/swift/updateXcode.bash diff --git a/docs/SDK-Documentation/getting-started/install/vue/directImport.html b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue/directImport.html similarity index 100% rename from docs/SDK-Documentation/getting-started/install/vue/directImport.html rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue/directImport.html diff --git a/docs/SDK-Documentation/getting-started/install/vue/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/vue/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue/install.bash diff --git a/docs/SDK-Documentation/getting-started/install/vue3/directImport.html b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue3/directImport.html similarity index 100% rename from docs/SDK-Documentation/getting-started/install/vue3/directImport.html rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue3/directImport.html diff --git a/docs/SDK-Documentation/getting-started/install/vue3/install.bash b/docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue3/install.bash similarity index 100% rename from docs/SDK-Documentation/getting-started/install/vue3/install.bash rename to docs/APIs-and-SDKs/SDK-Documentation/getting-started/install/vue3/install.bash diff --git a/docs/SDK-Documentation/index.mdx b/docs/APIs-and-SDKs/SDK-Documentation/index.mdx similarity index 100% rename from docs/SDK-Documentation/index.mdx rename to docs/APIs-and-SDKs/SDK-Documentation/index.mdx diff --git a/docs/Examples/Slack-Integration/index.mdx b/docs/APIs-and-SDKs/Web-Console-API/Examples/Slack-Integration/index.mdx similarity index 88% rename from docs/Examples/Slack-Integration/index.mdx rename to docs/APIs-and-SDKs/Web-Console-API/Examples/Slack-Integration/index.mdx index fb3731ce..b7be5e94 100644 --- a/docs/Examples/Slack-Integration/index.mdx +++ b/docs/APIs-and-SDKs/Web-Console-API/Examples/Slack-Integration/index.mdx @@ -191,17 +191,17 @@ We are going to make sure our webhook is `Enabled` and `Ordered`; we will give i and we will choose to send events whenever any experiment event occurs. This includes: -- [ExperimentCreated](/docs/web-console-docs/settings#experiment-created) -- [ExperimentDevelopment](/docs/web-console-docs/settings#experiment-started-in-development) -- [ExperimentStarted](/docs/web-console-docs/settings#experiment-started) -- [ExperimentStopped](/docs/web-console-docs/settings#experiment-stopped) -- [ExperimentRestarted](/docs/web-console-docs/settings#experiment-restarted) -- [ExperimentFullOn](/docs/web-console-docs/settings#experiment-put-full-on) -- [ExperimentEdited](/docs/web-console-docs/settings#experiment-edited) -- [ExperimentCommented](/docs/web-console-docs/settings#experiment-commented-on) -- [ExperimentArchived](/docs/web-console-docs/settings#experiment-archived) -- [ExperimentUnarchived](/docs/web-console-docs/settings#experiment-unarchived) -- [ExperimentAlertCreated](/docs/web-console-docs/settings#experiment-alert-created) +- [ExperimentCreated](/docs/web-console-docs/configuration/settings#experiment-created) +- [ExperimentDevelopment](/docs/web-console-docs/configuration/settings#experiment-started-in-development) +- [ExperimentStarted](/docs/web-console-docs/configuration/settings#experiment-started) +- [ExperimentStopped](/docs/web-console-docs/configuration/settings#experiment-stopped) +- [ExperimentRestarted](/docs/web-console-docs/configuration/settings#experiment-restarted) +- [ExperimentFullOn](/docs/web-console-docs/configuration/settings#experiment-put-full-on) +- [ExperimentEdited](/docs/web-console-docs/configuration/settings#experiment-edited) +- [ExperimentCommented](/docs/web-console-docs/configuration/settings#experiment-commented-on) +- [ExperimentArchived](/docs/web-console-docs/configuration/settings#experiment-archived) +- [ExperimentUnarchived](/docs/web-console-docs/configuration/settings#experiment-unarchived) +- [ExperimentAlertCreated](/docs/web-console-docs/configuration/settings#experiment-alert-created) Click `Create Webhook` and we should start receiving events in our express app! @@ -246,7 +246,7 @@ app.listen(PORT, () => { Now, whenever an experiment is changed or has an alert created, that event will be sent as a message to your Slack channel! -For further formatting of the messages, checkout the [various Webhook Payloads](/docs/web-console-docs/settings#payloads) that ABsmartly can send, and +For further formatting of the messages, checkout the [various Webhook Payloads](/docs/web-console-docs/configuration/settings#payloads) that ABsmartly can send, and have a look at Slack's [Message Formatting](https://api.slack.com/messaging/composing) documentation. Our final `index.ts` file should look like this: diff --git a/docs/APIs-and-SDKs/Web-Console-API/Examples/_category_ copy.json b/docs/APIs-and-SDKs/Web-Console-API/Examples/_category_ copy.json new file mode 100644 index 00000000..96fb6789 --- /dev/null +++ b/docs/APIs-and-SDKs/Web-Console-API/Examples/_category_ copy.json @@ -0,0 +1,6 @@ +{ + "position": 50, + "collapsible": true, + "collapsed": true, + "label": "Custom Integration Examples" +} diff --git a/docs/Web-Console-API/_category_.json b/docs/APIs-and-SDKs/Web-Console-API/_category_.json similarity index 68% rename from docs/Web-Console-API/_category_.json rename to docs/APIs-and-SDKs/Web-Console-API/_category_.json index a8ba2189..1068b1a4 100644 --- a/docs/Web-Console-API/_category_.json +++ b/docs/APIs-and-SDKs/Web-Console-API/_category_.json @@ -2,5 +2,5 @@ "position": 4, "collapsed": true, "collapsible": true, - "label": "Web Console API" + "label": "Platform API" } \ No newline at end of file diff --git a/docs/APIs-and-SDKs/_category_.json b/docs/APIs-and-SDKs/_category_.json new file mode 100644 index 00000000..5f1e1a37 --- /dev/null +++ b/docs/APIs-and-SDKs/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "collapsible": true, + "collapsed": true, + "label": "APIs & SDKs" +} diff --git a/docs/APIs-and-SDKs/overview.mdx b/docs/APIs-and-SDKs/overview.mdx new file mode 100644 index 00000000..dd0602ff --- /dev/null +++ b/docs/APIs-and-SDKs/overview.mdx @@ -0,0 +1,18 @@ +--- +sidebar_position: 0 +--- + +# Overview + +The APIs & SDKs section provides everything you need to integrate ABsmartly into your product. +This is the technical foundation that powers experiment assignment, event tracking, feature flags and consistent user experiences across all platforms. + +Here you will find: +- SDK documentation for client side, server side and mobile environments +- API references for the ABsmartly platform and the SDKs +- implementation guides, examples and best practices +- details on exposure handling, goal tracking and variant assignment + +If you are an engineer integrating ABsmartly or maintaining experiment infrastructure, this section is for you. +It explains how the SDKs work, how to use the API programmatically and how to build a reliable experimentation setup that fits your architecture. + diff --git a/docs/Onboarding/Azure SAML Setup.mdx b/docs/Third-party-integrations/SSO/Azure SAML Setup.mdx similarity index 98% rename from docs/Onboarding/Azure SAML Setup.mdx rename to docs/Third-party-integrations/SSO/Azure SAML Setup.mdx index a1baaede..dd108a1b 100644 --- a/docs/Onboarding/Azure SAML Setup.mdx +++ b/docs/Third-party-integrations/SSO/Azure SAML Setup.mdx @@ -1,4 +1,4 @@ -import Image from '../../src/components/Image'; +import Image from '../../../src/components/Image'; # SAML SSO - Azure diff --git a/docs/Onboarding/Google SAML Setup.mdx b/docs/Third-party-integrations/SSO/Google SAML Setup.mdx similarity index 97% rename from docs/Onboarding/Google SAML Setup.mdx rename to docs/Third-party-integrations/SSO/Google SAML Setup.mdx index 763bb475..f8baa441 100644 --- a/docs/Onboarding/Google SAML Setup.mdx +++ b/docs/Third-party-integrations/SSO/Google SAML Setup.mdx @@ -1,4 +1,4 @@ -import Image from '../../src/components/Image'; +import Image from '../../../src/components/Image'; # SAML SSO - Google Workspace diff --git a/docs/Onboarding/Okta SAML Setup.mdx b/docs/Third-party-integrations/SSO/Okta SAML Setup.mdx similarity index 98% rename from docs/Onboarding/Okta SAML Setup.mdx rename to docs/Third-party-integrations/SSO/Okta SAML Setup.mdx index b23482ae..6a4744f1 100644 --- a/docs/Onboarding/Okta SAML Setup.mdx +++ b/docs/Third-party-integrations/SSO/Okta SAML Setup.mdx @@ -1,4 +1,4 @@ -import Image from '../../src/components/Image'; +import Image from '../../../src/components/Image'; # SAML SSO - Okta diff --git a/docs/Third-party-integrations/SSO/_category_.json b/docs/Third-party-integrations/SSO/_category_.json new file mode 100644 index 00000000..4743a986 --- /dev/null +++ b/docs/Third-party-integrations/SSO/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 200, + "collapsed": true, + "collapsible": true, + "label": "SSO" +} \ No newline at end of file diff --git a/docs/Third-party-integrations/Segment-Integration/index.mdx b/docs/Third-party-integrations/Segment-Integration/index.mdx index 53ce3923..dc6a74e0 100644 --- a/docs/Third-party-integrations/Segment-Integration/index.mdx +++ b/docs/Third-party-integrations/Segment-Integration/index.mdx @@ -104,7 +104,7 @@ It can be useful to send experiment exposures to Segment for visibility from oth By default, the _Track Calls_ mapping will filter and not send any events with the name `Experiment Viewed` to ABsmartly. ::: -You can [install a custom event logger](/docs/SDK-Documentation/getting-started#using-a-custom-event-logger) in ABsmartly and send exposures directly to Segment. +You can [install a custom event logger](/docs/APIs-and-SDKs/SDK-Documentation/getting-started#using-a-custom-event-logger) in ABsmartly and send exposures directly to Segment. {JsSendingExposuresToSegment} @@ -256,7 +256,7 @@ To migrate from the classic ABsmartly destination to ABsmartly (Actions), discon for this purpose. In the ABsmartly context, we can - [install a custom event logger](https://docs.absmartly.com/docs/sdk-documentation/getting-started/#using-a-custom-event-logger) + [install a custom event logger](https://docs.absmartly.com/docs/APIs-and-SDKs/SDK-Documentation/getting-started/#using-a-custom-event-logger) and send exposures directly to Segment. {JsSendingExposuresToSegment} diff --git a/docs/Third-party-integrations/Zuko-Integration.mdx b/docs/Third-party-integrations/Zuko-Integration.mdx index 8e2ea129..c9c519a7 100644 --- a/docs/Third-party-integrations/Zuko-Integration.mdx +++ b/docs/Third-party-integrations/Zuko-Integration.mdx @@ -1,3 +1,7 @@ +--- +sidebar_position: 3 +--- + import Image from "../../src/components/Image"; # Zuko Integration @@ -15,7 +19,7 @@ Zuko to gain deeper insights into how your experiments affect your forms. ## Prerequisites 1. **ABsmartly Implementation:** Ensure ABsmartly is correctly implemented on your page. Refer - to the [SDK documentation](/docs/SDK-Documentation/getting-started) for more information. + to the [SDK documentation](/docs/APIs-and-SDKs/SDK-Documentation/getting-started) for more information. 2. **Zuko Account and Tracking:** [Create a Zuko account](https://app.zuko.io/signup) (if you don't have one) and ensure that the tracking is successfully added to your form. Refer to the [Zuko Installation Guides](https://www.zuko.io/guides) for more information. @@ -61,7 +65,7 @@ attributes into `window.zuko.attributes`. See [Zuko's documentation on setting c 1. **Run your experiment:** Make sure your experiment is running on ABsmartly and users are being assigned to different variants. 2. **Submit test forms:** Fill out the forms on your page while participating in the test. You can do this for - different variants using the ABsmartly SDKs' [override functionalities](/docs/SDK-Documentation/basic-usage#overriding-treatment-variants). + different variants using the ABsmartly SDKs' [override functionalities](/docs/APIs-and-SDKs/SDK-Documentation/basic-usage#overriding-treatment-variants). 3. **Check Zuko:** Go to your Zuko dashboard and look for the custom attributes you configured in the filters dropdown. You should see attributes like `exp_YourExperimentName` with values corresponding to the assigned variant for each form submission. It might take a few minutes for the data to appear in Zuko. diff --git a/docs/Third-party-integrations/_category_.json b/docs/Third-party-integrations/_category_.json index 68bfa4f5..f85d35e7 100644 --- a/docs/Third-party-integrations/_category_.json +++ b/docs/Third-party-integrations/_category_.json @@ -1,7 +1,7 @@ { - "position": 6, + "position": 3, "collapsible": true, "collapsed": true, - "label": "Third-party integrations" + "label": "Integrations" } \ No newline at end of file diff --git a/docs/get-started.mdx b/docs/get-started.mdx new file mode 100644 index 00000000..9ff4d637 --- /dev/null +++ b/docs/get-started.mdx @@ -0,0 +1,72 @@ +--- +sidebar_position: 0 +--- + +# Get Started + +Welcome to the ABsmartly Documentation. + +ABsmartly is an advanced experimentation and feature flagging platform designed to help teams validate ideas quickly, measure impact reliably and roll out changes safely. The platform provides everything you need to design experiments, expose users consistently, +measure impact with robust metrics and make evidence based product decisions. + +This documentation will guide you through how to use the platform, integrate it into your product and understand the core concepts behind trustworthy experimentation. + +--- + +## Documentation structure + +The documentation is divided into a few main sections to help you find what you need easily. + +### Product Documentation +This [section](./web-console-docs/overview) explains how to use the ABsmartly Web Console to run experiment and feature flags. + +You will find guides on: +- [creating](./web-console-docs/experiments/creating-an-experiment) and managing experiments +- defining [goals](./web-console-docs/goals-and-metrics/goals/overview) and [metrics](./web-console-docs/goals-and-metrics/metrics/overview) +- using [feature flags](./web-console-docs/feature-flags/creating-a-feature) +- tracking [events](./web-console-docs/events/the-events-page) +- managing [users, teams and permissions](./web-console-docs/users-teams-permissions/teams) +- configuring workspaces and settings +- using the [LaunchPad browser extension](./web-console-docs/launchpad-browser-extension/getting-started) + +If you use ABsmartly day to day to run experiments or feature flags, this is where you will spend most of your time. + +--- + +### APIs & SDKs + +This [section](./APIs-and-SDKs/overview) contains all technical resources for implementing ABsmartly in your product. + +You will find: +- [SDK documentation](./APIs-and-SDKs/sdk-documentation/) for client side, server side and mobile +- [SDK API](./APIs-and-SDKs/SDK-API/absmartly-collector-api) references and examples +- The full platform [API reference](./APIs-and-SDKs/SDK-API/absmartly-collector-api) for programmatic access + +Developers and engineers will spend most of their time here. + +--- + +### Integrations +Using the platform API, ABsmartly can integrate with any tools you already use for analytics, communication and authentication. + +This section includes from sample integration: +- [Slack integration](./Third-party-integrations/Slack-Integration) +- [Segment integration](./Third-party-integrations/Segment-Integration/) +- [Zuko integration](./Third-party-integrations/Zuko-Integration) +- [SSO setup](./Third-party-integrations/SSO/Azure%20SAML%20Setup) + +Each guide explains how to connect ABsmartly with your existing systems. + +--- + + +## What to do next + +If you are new to ABsmartly, we recommend starting with: +1. **Product Documentation → Getting Started** +2. **APIs & SDKs** (choose the SDK you need to integrate first) +3. **Experiments** to learn how to create, run and analyse your first test + +You can explore the rest of the documentation as you become more familiar with the platform. + +Welcome to ABsmartly. \ No newline at end of file diff --git a/docs/glossary.mdx b/docs/glossary.mdx new file mode 100644 index 00000000..3c5d6672 --- /dev/null +++ b/docs/glossary.mdx @@ -0,0 +1,755 @@ +--- +sidebar_position: 10 +--- + +# Glossary + +This glossary explains the key concepts used throughout ABsmartly and in product experimentation more broadly. +It is designed as a quick reference for anyone designing, running or analysing experiments. + +--- + +## A + +### A/A experiment +A special type of A/B experiment where users are randomly split between two *identical* variants. +The goal is not to test a product change, but to validate the experimentation setup itself. + +A/A experiments help validate tracking and detect issues such as sample ratio mismatch (SRM), tracking bugs or unexpected bias in randomisation before you start testing real changes. + +**Example:** Splitting traffic 50/50 between two identical versions of the homepage to verify that traffic allocation, event tracking and metrics behave as expected. + +### A/B experiment +A controlled experiment that compares a baseline experience (control, variant A) to a single alternative (treatment, variant B) to estimate the impact of a change. +This is the most common type of experiment setup and the default when creating a new experiment using ABsmartly. + +A/B experiments are the core building block of product experimentation and allow teams to quantify how a change affects key metrics. + +**Example:** Testing a new “Buy now” button design (B) against the current design (A) and measuring the change in purchase conversion rate. + +### A/B/n experiment +A special type of A/B experiment that compares a control to *multiple* treatments at the same time (A vs B vs C, etc.). +A/B/n experiments are also sometimes referred to as multi-variant. Not to be confused with [Multivariate Experiment](#multivariate-experiment). + +A/B/n experiments speed up exploration when several ideas are available, but they increase the number of comparisons and therefore require more traffic. + +**Example:** Testing three alternative product page layouts (B, C, D) against the current layout (A) to find the best-performing design. + +### Audience targeting +The practice of restricting an experiment to a specific subset of visitors based on attributes, behaviour or context. + +Targeting ensures that experiments are run on the right population (for example, new users only), but aggressive targeting can reduce sample size and affect generalisability. + +**Example:** Running an experiment only for traffic from a specific country or only for logged-in customers. + +--- + +## B + +### Baseline +The baseline (or baseline value) is the current performance of your metric, usually measured in AA experiment or a previous AB test. + +It represents the starting point against which you compare your Treatment. + +### Binomial +A statistical model describing outcomes that have exactly two discrete values such as 0 / 1, true / false, success / failure or convert / not convert. + +Many core metrics in experimentation, like conversion rate, follow a binomial process and use binomial-based methods for confidence intervals and tests. + +**Example:** Whether each visitor completed checkout (yes or no) in a cart conversion experiment. + +### Behavioral metrics +A metric that captures what users *do* in the product rather than what the business earns from them. + +Behavioural metrics such as clicks, scroll depth or page views are often more sensitive and can explain *why* a [business metric](#business-metrics) moves. +They can also help identify potential [false positive](#false-positive) when the effect on the user behaviour does not match the observed effect on the [primary metric](#primary-metric). +ABsmartly recommends using behavioural metrics as [secondary metrics](#secondary-metrics) to help support the decision and reduce the risk of false positive on the primary metric. + +**Example:** Click-through rate on a recommendation widget, or the number of search queries per user. + +### Business metrics +A metric directly tied to business outcomes such as revenue, profit, retention or subscription renewal. +Business metrics connect experimentation to company goals, but they can be noisier and slower to respond than behavioural metrics. + +ABsmartly recommends, when possible, using a business metric as the [primary metric](#primary-metric) when setting up experiments. + +**Example:** Revenue per user, paid subscription rate, or 90-day retention. + +--- + +## C + +### Conversion rate +The proportion of visitors who complete a defined goal out of all eligible visitors. + +Conversion rate is one of the most common primary metric, and small changes in conversion can have large business impact. + +**Example:** 4.8 percent of visitors who saw the checkout page completed a purchase. + +### Confidence interval +A range of values that represents where the true effect size is likely to lie, given the data and the chosen confidence level. +In experimentation, it is most often used to estimate the range in which the true difference between treatment and control lies. + +Confidence intervals convey both the size and the uncertainty of an effect, which is more informative than a p-value alone. +Confidence intervals help assess both statistical significance (does the CI exclude zero?) and practical significance (is the effect large enough to matter?). + +**Example:** If a test result shows a +2.3% lift with a 95% confidence interval of [+0.5%, +4.1%], +it means that if you were to repeat the same experiment 100 times, the true effect would lie within the CI in about 95 of those. + +You could say “We are 95% confident that the [true effect](#true-effect) of the treatment is between +0.5% and +4.1%.” + +### Confirmation bias +The tendency to focus on data that supports pre-existing beliefs and ignore or downplay the evidence. + +Confirmation bias can lead teams to cherry-pick metrics or time windows that “prove” a desired outcome. +To avoid such bias, ABsmartly recommends pre-registering the [decision criteria](#decision-criteria) before the experiment runs. + +**Example:** Highlighting only secondary metrics that moved in the expected direction and ignoring a neutral or negative primary metric. + +### Confidence level +The probability that the [confidence interval](#confidence-interval) procedure will capture the true value, across many hypothetical repetitions of the experiment. + +Common choices such as 90, 95 percent or 99 percent define how strict you are about uncertainty and directly relate to the [significance level](#significance-level). +A higher confidence level reduces the risk of [false positive](#false-positive) but requires more data (wider intervals). + +### Continuous metric +A numeric metric that can take many possible values on a range, not just discrete categories. + +Continuous metrics such as revenue or session duration often carry richer information but can be skewed and require outlier handling. + +**Example:** Average order value, time on page, or number of items in a basket. + +### Continuous learning +A way of working where teams regularly run experiments, use insights to refine their hypotheses and feed results back into discovery and design. + +Continuous learning turns experimentation into a long-term advantage instead of one-off tests. + +**Example:** Iteratively testing onboarding flows, using each result to inform the next design. + +### Continuous delivery +A software practice that keeps code in a releasable state so that changes can be deployed frequently and safely. + +Continuous delivery and experimentation complement each other: experiments de-risk changes, and frequent releases make it easier to act on experiment outcomes. + +**Example:** Automatically deploying small, tested increments behind feature flags several times per day. + +### CUPED (Controlled Experiments Using Pre-Experiment Data) +A variance reduction technique that adjusts experiment metrics using correlated pre-experiment data as a covariate. + +CUPED can significantly improve sensitivity so experiments reach conclusions faster or detect smaller effects using the same traffic. + +**Example:** Using each user’s historical spend as a baseline when analysing purchase revenue during the test. + +--- + +## D + +### Decision criteria + +The predefined rules or thresholds used to determine the outcome of an experiment—whether to ship, iterate, or discard a treatment based on its impact on key metrics. + +ABsmartly recommends pre-registering decisions criteria before the start of the experiment. + +**Example:** “Ship if we see the expected impact on the primary metric and secondary metrics and no guardrail metrics regress.” + +--- + +## E + +### Effect + +See [Observed effect](#observed-effect) + + +### Effect size + +A standardized measure of the magnitude of the effect, often expressed in absolute or relative terms. +It helps quantify how big the effect is, independent of sample size. + +Effect size is essential for determining whether a result is not just statistically significant, but also practically meaningful. +It's also used in power calculations when designing experiments. + +**Example:** If the treatment increases conversions from 5.0% to 5.5%, that’s a relative effect size of +10% (0.5 / 5.0). + +### Efficacy boundary +A statistical threshold used in [Group Sequential Testing](#group-sequential-testing) that, if crossed during an interim analysis, +allows the experiment to **stop early for success** — indicating that the treatment effect is large enough to be declared statistically significant before the full sample is collected. + +Efficacy boundaries improve agility by enabling early decisions, saving time and resources when strong evidence emerges. +However, they must be pre-defined and corrected to control the overall Type I error (false positive rate) across multiple looks at the data. + +### Experiment interaction +A situation where the effect of one experiment depends on whether another experiment is also running for the same users. +While most experiment interactions do not have an impact on the outcome of the experiments, +some strong interactions can distort results and make it hard to attribute observed effects to a single change. + +ABsmartly will alert users when interaction between 2 running experiments are detected. + +**Example:** A new search ranking algorithm combined with a new layout that changes click patterns in unexpected ways. + +### Experimentation power +The probability that an experiment will correctly detect a true effect when the treatment actually has a real impact. + +Power reflects the experiment's ability to avoid [false negatives](#false-negative) (Type II errors). +A common industry standard is 80% power, meaning there's a 20% chance the test will miss a real effect. + +Low-powered tests risk overlooking meaningful changes or underestimating effect sizes which leads to unreliable decisions. + +ABsmartly considers an experiment to be completed only once it achieved sufficient power (its [sample size](#sample-size) if large enough). + +**Example:** If you design an A/B test with 80% power to detect a 2% lift in conversions, you have an 80% chance of seeing a statistically significant result if the treatment truly improves conversions by 2% or more. + +### Experiment replication +Running the same experiment again to confirm that a previous result was not due to chance. +Replication increases confidence that the observed effect is real and not a fluke caused by noise, novelty effects, or local conditions. + +Replication strengthens trust in results, especially for experiments with borderline significance, surprising outcomes, or high business impact. +It also helps filter out false positives, which are common when the overall success rate is low. + +You might skip replication for low-risk UI changes, but re-running a test is advisable when the result will drive a strategic roadmap shift or if the experiment is highly visible across the organization. + +### Exploratory vs confirmatory experiments +Exploratory experiments are used to search for patterns or promising directions; confirmatory experiments are designed to rigorously test a specific hypothesis. +Mixing the two modes can lead to inflated false positives; exploratory insights should ideally be confirmed with a follow-up confirmatory test. + +**Example:** Trying several onboarding variants to see what seems promising (exploratory) then running a focused A/B test on the chosen design (confirmatory). + +--- + +## F + +### False discovery +A false discovery occurs when an experiment shows a statistically significant result, +but there is no real effect — meaning the “win” is actually due to random noise, not the treatment. + +**False discoveries are a normal part of experimentation**. When you run many A/B tests with a given significance level — some “wins” will occur by chance. +Accepting this is part of working in a probabilistic system. + +However, blindly acting on false discoveries can waste resources, mislead strategy, or harm user experience. +That's why understanding — and managing — the risk of false discoveries is essential. + +A few things to consider to reduce the risks of False Discoveries + +- Ground hypotheses in user research, behaviour, prior data, or product theory. Avoid “spaghetti testing” — randomly trying ideas just to see what sticks. +- Retest or replicate high-impact or surprising results before launching. This adds confidence and filters out false positives. +- Use falsifiable hypotheses and define what success and failure look like before running the experiment. + +**Example:** You run 100 experiments with a significance level of 0.05. Even if none of the treatments actually work, about 5 will show false “wins” by chance. +These are false discoveries — not because of bad math, but because that’s how probability works. + +### False discovery rate (FDR) +The proportion of all statistically significant results that are actually false positives across a group of experiments. + +FDR is a portfolio-level metric: it tells you how many of your “wins” are likely to be wrong. +This matters when you run many experiments, especially if you don’t adjust for multiple comparisons or if your overall success rate is low. + +See also [False positive risk](#false-positive-risk) + +**Example:** If your team ran 500 A/B tests and 100 were statistically significant, but only 60 of those have an actual true effects, then your False Discovery Rate is 40% — meaning 40 out of 100 wins are likely false. + +### False negative +Failing to detect a real effect when it exists; equivalent to a Type II error. +False negatives cause missed opportunities where genuinely beneficial changes are discarded. + +**Example:** Abandoning a feature improvement that would have increased conversion by 1 percent because the test was underpowered. + +### False positive +Concluding that an effect exists when, in reality, there is none; equivalent to a Type I error. +False positives lead to rolling out changes that do not help and may even hurt the business. + +**Example:** Launching a redesign because the experiment happened to show a spurious uplift. + +### False positive risk +The probability that a statistically significant result is actually a false positive — in other words, the chance that the null hypothesis is still true, despite rejecting it. + +This is a per-result interpretation of significance, helping you assess whether an individual “win” is trustworthy. +False Positive Risk depends not just on the p-value or alpha level, but also on power and the prior probability that the treatment is effective (i.e. the base success rate in your organization). + +See also [False discovery rate](#false-discovery-rate-fdr) + +**Example:** +If your team runs an experiment with α = 0.05 and the prior success rate is 10%, then a significant result with p < 0.05 could still have a 22%–38% chance of being false — much higher than the 5% most people assume. + +### Feature flag +A control mechanism that lets you turn a feature on or off, or vary it across users, without redeploying code. +Feature flags make it easier to run experiments, carry out gradual rollouts and quickly roll back problematic changes. + +**Example:** Enabling a new checkout flow only for 10 percent of traffic via a flag while monitoring guardrail metrics. + +### Fishing +Searching through many metrics, segments or time windows without predefined hypotheses until something appears significant. +Fishing inflates the chance of false positives and can produce misleading “insights” that do not replicate. + +To prevent fishing, it is recommended to pre-register decision criteria before the experiment starts. + +**Example:** Testing dozens of segment combinations after the fact and reporting only the one combination that shows a significant effect. + +### Fixed horizon testing +A testing approach where sample size or duration is specified in advance and data is formally analysed only once, at the end. +Fixed horizon methods are conceptually simple but are not robust to unplanned peeking or early stopping. + +See also [Group Sequential Testing](#group-sequential-testing). + +**Example:** Committing to run an experiment for exactly two weeks and making a decision only after both weeks have completed. + +### Fully sequential testing (mSPRT) +A testing framework that allows continuous monitoring and stopping at any time while maintaining valid error guarantees, often based on sequential probability ratio tests. +Fully sequential methods offer maximum flexibility in when to stop, at the cost of experimentation power and more complex design and interpretation. + +See also [Group Sequential Testing](#group-sequential-testing). + +### Futility boundary +A statistical threshold used [Group Sequential Testing](#group-sequential-testing) that, if crossed during an interim analysis, +allows the experiment to stop early for lack of effect — indicating that the treatment is unlikely to produce a meaningful or statistically significant improvement, +even if the test continues to full sample size. + +Futility boundaries improve efficiency by preventing wasted time and traffic on experiments that show little promise. +They help teams focus on higher-impact ideas, but must be pre-defined and adjusted to avoid inflating the Type II error (false negatives) across multiple analyses. + +### Futility type +The rule or criterion used to define what constitutes “futility” during interim analyses in [Group Sequential Testing](#group-sequential-testing). +It determines whether an experiment should stop early because it is unlikely to lead to a statistically significant result or if it should continue running. + +There are two common futility types: + +**Non-binding futility**: You may stop the test if the boundary is crossed, but you’re not required to. It doesn’t affect the final significance level if you continue. + +**Binding futility**: If the futility boundary is crossed, the test must stop. Ignoring it would invalidate the final p-value, potentially inflating Type I error. + +Choosing a futility type affects both statistical validity and decision flexibility. +Non-binding futility provides optionality for business judgment, while binding futility enforces stricter control over error rates. + +By default, GST experiment in ABsmartly uses a binding futility type but this can be changed during the setup. + +**Example:** At the halfway point of an A/B test, the test crosses the futility boundary, +but the team decides to continue because external factors suggest the impact may emerge later — a valid choice under the non-binding rule. + +--- + +## G + +### Group sequential testing +A sequential approach where you predefine interim analyses (checkpoints) at which you are allowed to analyse data and possibly stop early. +Group sequential designs balance flexibility with simplicity and are well suited to practical experimentation where a few well-timed looks are enough. + +Group Sequential Testing is the default method when creating new experiment. +It leads to making decisions up to 80% faster than with a more traditional [Fixed Horizon Experiment](#fixed-horizon-testing). + +Do you want to know more about Group sequential testing? Read our [dedicated GST article](https://absmartly.com/gst) + +### Guardrail metrics +Metrics monitored to ensure experiments stay within acceptable safety or performance constraints on some key KPIs, independent of the impact observed on the primary or secondary metrics. +Guardrails protect user experience and business health while teams try bold ideas. + +The best practice is for all experimenting teams within a product area to agree on a set of guardrail metrics to monitor for all experiments. +That way all decisions are made with the same level of confidence on the potential impact on some key KPIs. + +**Example:** Monitoring error rate and page load time while testing a new recommendation algorithm. + +--- + +## H + +### Hold-out group +A subset of visitors deliberately excluded from a feature rollout or an experiment and kept on the old experience for comparison. +Hold-outs help measure long-term or background effects, and can act as a control for rolling experiments or feature flags. + +**Example:** Keeping 5 percent of visitors on the previous pricing model to track long-term revenue impact. + + +### Hypothesis + +A specific, testable prediction about the outcome of an experiment usually describing how a change (treatment) is expected to affect a key metric. + +A well-formed hypothesis helps ensure that tests are intentional and interpretable, not just random trial-and-error ("spaghetti testing"). +It provides a clear basis for decision-making, learning, and iteration. + +**Example:** “Reducing the number of form fields on the checkout page will increase conversion rate by at least 2%.” + +A good hypothesis is falsifiable (can be proven wrong), linked to user behavior or product theory, and often includes an expected direction and magnitude of effect. + + +### Hypothesis testing +A statistical framework used to evaluate whether observed differences in an experiment are likely due to chance or reflect a real effect. + +Hypothesis testing helps teams make data-driven decisions by providing a structured way to accept or reject the null hypothesis. +It's the foundation for calculating p-values, confidence intervals, and determining statistical significance. + +**Example:** In an A/B test comparing two landing pages, hypothesis testing is used to assess whether the observed +3.2% lift in conversion rate is statistically significant, +or just a result of random variation. + +--- + +## I + +### Impact estimate +An estimate of the observed performance of a treatment variant compared to the control, +typically calculated as a relative increase or decrease in the metric. + +The relative impact tells you how much better or worse the treatment performed relative to the baseline. + +### Interaction effect +A situation where the combined effect of two variables differs from the sum of their individual effects. +Interaction effects can explain why a change works well in one context but not another. + +ABsmartly automatically warn experimenters of possible interactions between 2 or more experiments. + +**Example:** A new layout increases conversion for mobile users but decreases it for desktop users, altering the overall result. + +### Lower bound estimate +The lower end of a confidence interval, often used as a conservative estimate of effect size. +Reporting lower bounds can give decision makers a “worst plausible improvement” and reduce over-optimism. + +**Example:** A lift of 5% with a 95 percent interval from 1% to 9% has a 1% lower bound. + +--- + +## M + +### MDE (Minimum detectable effect) +The smallest effect size that an experiment is designed to detect with the chosen power and significance level. +MDE connects business expectations with statistical design; too small and tests become expensive, too large and you miss meaningful improvements. + +**Example:** Planning a test to detect at least a 2% relative increase in checkout conversion. + +### Mean +The arithmetic average of a set of values. +Many metrics are reported as means, such as revenue per user, and assumptions about distributions often centre on the mean. + +**Example:** Total revenue of 10,000 across 200 users gives a mean of 50 per user. + +### Metric sensitivity +It refers to how responsive a metric is to real changes in user behavior and how easy it is to detect those changes statistically. + +A sensitive metric will show a statistically significant effect even for small real improvements. +An insensitive metric will require large changes (or large sample sizes) to detect a significant effect. + +Metric sensitivity = how likely a metric is to detect true effects. + +It depends on: +- [Effect size](#effect-size) — how much the treatment actually impacts the metric +- [Variance](#variance) — how noisy or stable the metric is +- [Sample size](#sample-size) — how much data you collect +- [Baseline value](#baseline) — some metrics behave differently at different scales + +### Metric variance +Metric variance refers to the amount of variability or spread in the values of a metric across visitors. +In A/B testing, high variance means the metric fluctuates widely from visitor to visitor, while low variance means it remains relatively stable. + +High variance makes it harder to detect real effects, requiring larger sample sizes or longer test durations to reach statistical significance. +Low variance metrics are generally more sensitive and more efficient for testing. + +High variance issues can be mitigated using techniques like [CUPED](#cuped-controlled-experiments-using-pre-experiment-data) or by managing [outliers](#outliers). + +**Example:** +High variance metric: +Revenue per user — some users spend a lot, most spend nothing. This distribution is heavily skewed, leading to large variance. + +Low variance metric: +Click-through rate (CTR) on a button — most users either click or don’t, and the values are bounded (0 or 1), resulting in low variance. + + +### Multivariate experiment +An experiment that tests multiple elements of a page or experience at the same time by combining different versions of each element into many variant combinations. +Instead of only comparing A vs B, a multivariate test evaluates how several changes and their interactions affect the outcome. +Not to be confused with [Multi-variant experiment](#multi-variant-experiment). + +Multivariate experiments help you understand not just *whether* a change works, but *which combination* of changes works best. +They are useful when you want to optimise several components together, such as headline, image and call to action. +However, they require significantly more traffic than a simple A/B test, because traffic must be spread across many variant combinations and the analysis is more complex. +For this reason Multivariate experiments are not supported in ABsmartly. + +**Example:** +You want to optimise a landing page with the following: +- 2 different headlines (H1, H2) +- 3 different hero images (I1, I2, I3) +- 2 different call-to-action buttons (C1, C2) + +A multivariate experiment would test all 2 × 3 × 2 = 12 combinations (for example H1–I2–C1, H2–I3–C2, and so on) and estimate which combination yields the highest conversion rate, as well as whether certain headlines work better only with specific images or buttons. + +### Multi-variant experiment +See [A/B/n experiment](#abn-experiment). +Not to be confused with [Multivariate experiment](#multivariate-experiment). + + +--- + +## N + +### Null hypothesis +A formal assumption in statistical testing that there is no true effect or difference between the treatment and control groups. +It represents the default position that any observed difference is due to random chance. + +The null hypothesis is the foundation of significance testing. +In A/B testing, you aim to collect enough evidence to reject the null hypothesis and conclude that the treatment likely has a real effect. +If you fail to reject it, you assume the data is consistent with no meaningful difference. + +**Example:** +You run an A/B test on a checkout button. + +Null hypothesis (H₀): The new button (Variant B) has the same conversion rate as the original (Variant A). + +If your p-value is below your chosen threshold (e.g. α = 0.05), you reject H₀ and infer that Variant B likely has an effect. + +**Important Notes:** +- Rejecting the null does not prove the treatment is better — only that the observed data is unlikely if there were no effect. +- Failing to reject H₀ does not prove the variants are the same — just that there's not enough evidence to conclude a difference. + +--- + +## O + +### Observed effect +The measured difference between treatment and control groups in an experiment. It represents the impact of the change being tested. +The observed effect is the best point estimate available from the data, but it is subject to sampling variability. + +**Example:** Treatment shows a 5.1 percent conversion rate, control 4.8 percent, so the observed effect is 0.3 percentage points. + +### One-tailed analysis +A statistical test that checks for an effect in only one direction. Either whether the treatment is better than the control, or worse than, but not both. +It does not test for two-way differences. + +One-tailed tests offer greater statistical power than [two-tailed](#two-tailed-analysis) tests, meaning they can detect effects with smaller sample sizes but only when you care about a change in one direction. + +When it's appropriate: +One-tailed analysis makes sense if: +- You're only interested in detecting improvement +- You would make the same decision (ie not ship) if the result is neutral or negative. + +This applies in “ship vs. no-ship” scenarios, where you only want to ship if the variant is better, and don't need to detect harm because you wouldn’t ship it anyway. + +**Example:** You test a new pricing design. If it improves revenue, you’ll ship it. If it’s flat or worse, you won’t — so a one-tailed test for improvement is appropriate. + +### Operational metric +A metric that reflects system health or performance rather than direct user or business outcomes. +While some experiments might be targeting them, operational metrics often act as guardrails and basic safety checks during experiments. + +ABsmartly recommends using some key operational metrics as guardrail metrics to ensure the experimentation program does not hurt those key KPIs. + +**Example:** Error rate, latency, CPU utilisation or cache hit rate. + +### Outliers +Outliers are data points that are significantly higher or lower than the rest of the data. +In experimentation, they often represent extreme user behavior (e.g., unusually large purchases or anomalous session lengths) and can disproportionately affect averages and [variances](#metric-variance). + +Outliers can inflate variance, distort means, and reduce test sensitivity, especially for metrics like revenue or engagement that are naturally skewed. +Even a few extreme values can lead to misleading results, particularly in small or medium-sized experiments. + +**Example:** In a test measuring revenue per user, most users spend $0–$50, but one user spends $5,000. +This outlier can shift the average upward, making the treatment look better than it really is. + +**Risks associated with outliers:** +- Loss of signal: Outliers are real users. Trimming them can hide important effects or exclude some key user segment. +- Lack of transparency: Unclear or inconsistent handling of outliers can erode trust in experimentation results. + +--- + +## P + +### Peeking +Peeking refers to looking at experiment results before the test is completed, especially to check for statistical significance and +making decisions based on those early results without proper statistical adjustments. + +Peeking inflates the [false positive rate](##false-discovery-rate-fdr), making it more likely that you'll incorrectly conclude a treatment is effective when it’s not. +This happens because repeatedly checking increases the chance that random noise appears significant at least once. + +**Example**: +You run an A/B test designed for 100,000 users, but check results every day. +On day 6, with only 40% of the data collected, you decide to stop early and ship because you see some promising results. +This is peeking and you may be acting on a false discovery. + +### Power level + +See [Experimentation power](#experimentation-power). + +### Power calculation +The process of choosing sample size, MDE, significance level and power so that an experiment is appropriately designed. +Good power calculations align statistical design with practical constraints like traffic, time and business priorities. + +Properly powering an experiment is a requirement for making good reliable data-informed decisions. + +The ABsmartly's built-in power calculator makes it easy to design your experiment correctly. + +**Example:** Deciding that you need 50000 visitors per variant to detect a 1% increase with 80 percent power at a 5 percent significance level. + +### Pre-selection bias +Bias introduced when the users who enter a study are not representative of the broader population or when assignment is not properly random. +Pre-selection bias can make experiment results look better or worse than they will be in real-world rollout. + +**Example:** Testing a new feature only on highly engaged users and then rolling it out to everyone. + +### Primary metric +The main metrics used to judge success or failure of an experiment. +Primary metrics should be chosen carefully in advance to reflect the experiment’s objective; they drive decisions. + +When creating an experiment in ABsmartly, users must choose a single primary metric. This will be the main decision making metric. +It is usually good practice to choose a business metric as the primary metric. + +**Example:** Checkout conversion rate for an experiment on the payment page. + +### Product experimentation +The use of controlled experiments to evaluate product changes and make product decisions grounded in evidence. +Product experimentation turns hypotheses about user behaviour into measurable tests and supports continuous improvement. + +**Example:** Testing new onboarding journeys, pricing presentations or recommendations. + +### Product operating model +A framework that describes how product teams discover opportunities, deliver solutions and use experimentation and data as part of their regular workflow. +A coherent operating model ensures experimentation is not a one-off activity but a core part of how the organisation builds products. + +### P-value +The probability of observing data at least as extreme as what you saw, assuming the null hypothesis is true. +P-values are widely used but easily misinterpreted; they are not the probability that the null is true. + +**Example:** A p-value of 0.03 indicates that, if there were no true effect, you would see a result this extreme or more in about 3 percent of repeated tests. + +### P-hacking +Manipulating analysis choices, data cuts or stopping rules until a desired level of significance is achieved. +P-hacking severely inflates false positives and creates misleading “evidence”. + +**Example:** Trying different subsets of users and time windows until one yields p < 0.05, then reporting only that result. + +--- + +## S + +### Sample size +The number of visitors included in an experiment. +Sample size, together with [variance](#variance) and [effect size](#effect-size), determines [power](##experimentation-power) and the time needed to reach a conclusion. + +### Secondary metrics +Additional metrics tracked in an experiment to understand side effects or support interpretation of the primary metric. +Secondary metrics reveal trade-offs and help explain why a primary metric changed. Secondary metrics can also be used to reduce the risk of false positive on the primary metric + +**Example:** Monitoring average order value and gross conversions while the primary metric is net conversion rate. + +### Significance level (alpha) +The maximum acceptable probability of a [Type I error](#type-1-error) that you are willing to tolerate in a single hypothesis test. +It determines the threshold at which you consider a result statistically significant. + +**Example:** If you set α = 0.05, and your p-value is below 0.05, you declare the test result statistically significant — i.e., there's enough evidence to reject the null hypothesis. + +### Spillover effect +When the impact of a change spills over from users in one variant to users in another, breaking the independence assumption. +Spillover can bias results and is especially relevant for social features, shared environments or marketplaces. + +**Example:** Discounts shown only to the treatment group affecting reference prices for control users. + +### SRM (sample ratio mismatch) +A discrepancy between the expected allocation of users across variants and what is actually observed. +SRM is a strong signal that something is wrong with the implementation of the test. +Experiment results should not be trusted until the cause is understood. + +ABsmartly automatically checks for SRM and reports any issue to the experimenters. + +**Example:** Configuring a 50/50 split but observing 60 percent of traffic in control and 40 percent in treatment. + +### Standard deviation +A measure of how spread out or variable your data is. It tells you, on average, how far each data point is from the mean (average). +Standard deviation is central to many formulas for [confidence intervals](#confidnce-interval), [z-scores](#z-score) and sample size calculations. + +- If your data points are close to the mean, the standard deviation is small. +- If your data points are widely spread out, the standard deviation is large. + +**Example:** +Imagine two sets of A/B test results for daily revenue (in dollars): +Group A: [100, 102, 98, 101, 99] +Mean = 100, Standard deviation ≈ 1.58 → very stable + +Group B: [80, 120, 70, 130, 100] +Mean = 100, Standard deviation ≈ 23.45 → much more variation + +Same mean, very different variability! + +### Statistical power +The probability that a test will detect a true effect of the planned size or larger. +High power means you are less likely to miss real improvements; very low power leads to many inconclusive or misleading tests. + +In AB testing, power is typically set to 80% meaning that 8 out of 10 times, the test will detect the planned effect. + +### Statistical significance (threshold) +A label applied when results meet the predefined significance criterion, usually p < alpha. +Statistical significance indicates that the observed effect is unlikely to be due to chance alone under the null model, but it does not guarantee practical importance and says nothing about the size of the effect. + +**Example:** A 3 percent lift in conversion with p = 0.01 at alpha = 0.05 is statistically significant. + +--- + +## T + +### True effect +The true effect is the actual impact a treatment (e.g., new feature, UI change, algorithm tweak) has on a metric in the entire population, not just in your sample. +We never know the true effect. We can only estimate it. + +The true effect is what you would observe if you ran the experiment on every user forever, under ideal conditions, with perfect measurement. + +But since we can't do that, we: +- Run the test on a sample of the population, and +- Use statistics to estimate the true effect. + +**Example:** +You run an A/B test and observe that Treatment increased conversion rate by +1.2%, with a p-value of 0.03. +That +1.2% is your [observed effect](#observed-effect). + +But the true effect might be different, it could be +0.5% or +1.7%. + +The [confidence interval](#confidence-interval) tells you the range where the true effect likely lies (e.g., [0.2%, 2.2%]). + +An experiment gives you a statistical estimate (with uncertainty) of that true effect. + +### Two-tailed analysis +A statistical test that checks for an effect in either direction whether the treatment is better or worse than the control. +It evaluates for any significant difference in both direction, not just improvement. + +Two-tailed tests are more conservative than [one-tailed](#one-tailed-analysis) tests: they require stronger evidence to detect an effect, +but they protect against surprises in both directions. + +A Two-tailed analysis makes sense if: +- You care about any meaningful change, positive or negative +- You want to detect both improvements and regressions +- A negative result would cause a different decision (e.g. rollback, investigation) + +This applies in scenarios where risk of harm matters, or where learning about both upside and downside is important. + +Example: You test a new signup flow. If it improves conversion, you’ll ship. +But if it hurts conversion, you want to detect that too so you use a two-tailed test. + +### Twyman’s law +A heuristic that states “the more surprising a result looks, the more likely it is to be wrong or misleading”. +Twyman’s law reminds teams to double-check interesting or extreme results for errors, bias or artefacts. + +**Example:** Discovering a 50 percent uplift from a minor colour change should trigger strong suspicion and careful validation. + +### Type I error +Incorrectly rejecting the null hypothesis when it is actually true; also called a [false positive](#false-positive). +Type I errors lead to rolling out ineffective changes based on spurious results. + +### Type II error +Failing to reject the null hypothesis when it is false; also called a [false negative](#false-negative). +Type II errors cause teams to miss out on beneficial changes that would have helped users or the business. + +--- + +## V + +### Variance +A measure of spread that averages the squared distance between each value and the mean. +Variance is the foundation for [standard deviation](#standard-deviation) and influences sample size and sensitivity. + +**Example:** A metric with low variance has values clustered tightly around the mean; high variance means values are more scattered. + +### Variance reduction +Any method that reduces the variance of metric estimates without changing their meaning, such as using pre-experiment covariates or better metric definitions. +Lower variance improves power and reduces how long experiments need to run. + +**Example:** Applying CUPED to revenue per user so that differences between variants become clearer with the same traffic. + +--- + +## Z + +### Z-score +A standardised value expressing how many standard deviations a data point or effect is away from the mean or from zero. +Z-scores provide a common scale for test statistics and link directly to p-values in many tests. + +**Example:** A z-score of 2 corresponds to an effect about two standard deviations above zero, which roughly maps to p ≈ 0.045 in a two-tailed test. \ No newline at end of file diff --git a/docs/platform-release-notes/2024/12.mdx b/docs/platform-release-notes/2024/12.mdx index 3ff809a1..fab9467d 100644 --- a/docs/platform-release-notes/2024/12.mdx +++ b/docs/platform-release-notes/2024/12.mdx @@ -19,15 +19,15 @@ and faster to create and monitor experiments. the creation process. For more details about the new GST improvement check our -[documentation](/docs/web-console-docs/setting-up-a-gst-experiment). +[documentation](/docs/web-console-docs/experiments/setting-up-a-gst-experiment). ## Fields Locking in templates We improved our experiment and feature templates by making it possible to lock some fields so they are no longer editable when creating experiments or features. This ensures standardized setups and reduces the risk of -errors. For more information on creating and using templates you can read -our [documentation](/docs/web-console-docs/templates). +errors. For more information on creating and using templates, you can read +our [documentation](/docs/web-console-docs/experiments/templates). ## Default platform settings @@ -36,7 +36,7 @@ fields of your experiment setup. Click on 'Settings > Platform settings' to open the default analysis setup. - Choose which analysis type - ([Group Sequential or Fixed Horizon](/docs/web-console-docs/types-of-analysis)) + ([Group Sequential or Fixed Horizon](/docs/web-console-docs/experiments/overview#analysis-methods)) is the default for all new experiments. - Specify minimum and default values for confidence and power levels - Specify a default MDE for Group Sequential and Fixed Horizon diff --git a/docs/platform-release-notes/2025/02.mdx b/docs/platform-release-notes/2025/02.mdx index d991d4d9..0b17e235 100644 --- a/docs/platform-release-notes/2025/02.mdx +++ b/docs/platform-release-notes/2025/02.mdx @@ -10,7 +10,7 @@ designed to help you track your experimentation program more effectively. ## Beta Feature: Velocity Report We're introducing the -[Velocity Report (Beta)](/docs/web-console-docs/Experiment-reports) to +[Velocity Report (Beta)](/docs/web-console-docs/experiments/Experiment-reports) to give teams better visibility into experimentation speed and execution trends. This report helps you understand how efficiently experiments move through different stages, providing insights to optimize your diff --git a/docs/platform-release-notes/2025/03.mdx b/docs/platform-release-notes/2025/03.mdx index 4dac0e23..3d2040f4 100644 --- a/docs/platform-release-notes/2025/03.mdx +++ b/docs/platform-release-notes/2025/03.mdx @@ -30,7 +30,7 @@ key updates: After the Velocity Report, we are also excited to launch the second part of our reports, the -[Decisions Reports (Beta)](/docs/web-console-docs/experiment-reports/#decisions-overview). +[Decisions Reports (Beta)](/docs/web-console-docs/experiments/experiment-reports/#decisions-overview). While the **Velocity Report** focuses on the experiments you and your colleagues run, the **Decisions Report** focuses on the choices you make as a result of those experiments. Those decisions include ***Full On***, diff --git a/docs/platform-release-notes/2025/04.mdx b/docs/platform-release-notes/2025/04.mdx index 51c1b194..98ad28eb 100644 --- a/docs/platform-release-notes/2025/04.mdx +++ b/docs/platform-release-notes/2025/04.mdx @@ -18,10 +18,10 @@ In this release, we focused mainly on improvements to the **Velocity** and **Dec ## Decisions Report -Earlier this month we launched the new [Decisions Reports (Beta)](/docs/web-console-docs/experiment-reports/#decisions-overview). In this release we are introducing a few improvements to provide better insights into each decision. +Earlier this month we launched the new [Decisions Reports (Beta)](/docs/web-console-docs/experiments/experiment-reports/#decisions-overview). In this release we are introducing a few improvements to provide better insights into each decision. -- **Secondary & Guardrail metrics:** Like with the primary metric, you can now see the experiment results on the secondary and guardrail metrics. This helps understand the rationale behind each decision.** -- **Health checks:** We are now surfacing any [health check violations](/docs/web-console-docs/Experiment-health-checks) in the reports so you can get better insights into the quality of each decision. +- **Secondary & Guardrail metrics:** Like with the primary metric, you can now see the experiment results on the secondary and guardrail metrics. This helps understand the rationale behind each decision. +- **Health checks:** We are now surfacing any [health check violations](/docs/web-console-docs/experiments/Experiment-health-checks) in the reports so you can get better insights into the quality of each decision. --- diff --git a/docs/platform-release-notes/2025/06.mdx b/docs/platform-release-notes/2025/06.mdx index dc6e95c2..04bc46b3 100644 --- a/docs/platform-release-notes/2025/06.mdx +++ b/docs/platform-release-notes/2025/06.mdx @@ -13,7 +13,8 @@ Teams are now much more than just metadata. You can properly define and manage t - Assigning users to teams - Structuring teams hierarchically (e.g. parent and child teams) -This is the first step toward upcoming improvements around ownership, permissions, and team-level collaboration. Go to `Settings > Teams` to get started with managing your team and inviting your team members. If you already have teams defined in ABsmartly, you can simply move them to the right place in your org structure. Check our [wiki page](/docs/web-console-docs/creating-and-managing-teams) for more information on creating and managing teams in ABsmartly. +This is the first step toward upcoming improvements around ownership, permissions, and team-level collaboration. Go to `Settings > Teams` to get started with managing your team and inviting your team members. If you already have teams defined in ABsmartly, you can simply move them to the right place in your org structure. +Check our [wiki page](/docs/web-console-docs/users-teams-permissions/teams) for more information on creating and managing teams in ABsmartly. --- diff --git a/docs/platform-release-notes/2025/11.mdx b/docs/platform-release-notes/2025/11.mdx index d7fe7571..f3d0dde9 100644 --- a/docs/platform-release-notes/2025/11.mdx +++ b/docs/platform-release-notes/2025/11.mdx @@ -13,7 +13,8 @@ This new release is packed with new features and improvements to help you manage We made it easier to create simple experiments using our new **ABsmartly LaunchPad**. This Chrome Extension makes it possible to create experiments using our new Visual Editor and without the need for developers. While the Chrome Extension is still in Beta, we encourage you to give it a try and give us feedback. -Before you get started see our guides on [getting started with the LaunchPad](/docs/LaunchPad%20Browser%20Extension/getting-started-with-the-launchpad) and [creating your first experiment](/docs/LaunchPad%20Browser%20Extension/creating-an-experiment-with-the-launchpad). +Before you get started see our guides on [getting started with the LaunchPad](/docs/web-console-docs/launchpad-browser-extension/getting-started) +and [creating your first experiment](/docs/web-console-docs/launchpad-browser-extension/creating-an-experiment-with-the-launchpad). This first release is only the first step as we have big plans for the **ABsmartly LaunchPad** in 2026. @@ -32,7 +33,7 @@ This feature makes it easier for teams to collaborate and use assets without hav /> This was the last building block in our Ownership & Permission model. Some changes will be necessary on your side before you can fully make use of this new model. -We encourage you to read our [how-to guide](/docs/web-console-docs/ownership-and-permissions) and to reach out to us if necessary so we can jump on a call and guide you through those changes. +We encourage you to read our [how-to guide](/docs/web-console-docs/users-teams-permissions/ownership-and-permissions) and to reach out to us if necessary so we can jump on a call and guide you through those changes. We want to make things better and future-proof for you. We realise that we introduced a lot of changes but unless you take action on all the action items described above then nothing should change (except for the Team Ownership which is now the default but which can be disabled in your settings) and you will be able to keep using the platform like you use it today. diff --git a/docs/platform-release-notes/2025/12.mdx b/docs/platform-release-notes/2025/12.mdx new file mode 100644 index 00000000..ab37a0df --- /dev/null +++ b/docs/platform-release-notes/2025/12.mdx @@ -0,0 +1,126 @@ +import Image from "../../../src/components/Image"; + +# December 2025 + +## Overview +This release is all about **Metrics**. As part of our broader initiative to improve **metric governance**, +we’ve introduced powerful new capabilities to help you better manage, understand, and select the right metrics for your experiments. + +--- + +## General improvements + +We've made some general improvements to Metrics that you will see across the platform. + +### New **Metric Categories** type +We've added a new configuration type that helps categorise and group metrics. Those new metric categories will make it easier to find the right metrics when creating an experiment. + +While the categories should reflect your own needs, here is a list of possible metric categories you can add to your ABsmartly: + +- `Conversion`: Measures whether users complete a desired action. +- `Revenue`: Captures direct monetary impact. +- `Engagement`: Reflects how actively users interact with the product. +- `Retention`: Shows whether users come back or continue using the product over time. +- `Performance`: Measures speed and responsiveness, such as load time or latency. +- `Reliability`: Tracks stability and correctness, including errors, failures, or availability. +- `Quality`: Represents outcome quality or user experience signals like cancellations, refunds, or unsuccessful outcomes. + +### New metric's metadata fields +We've added new metadata fields to metrics that help with discoverability and filtering across the platform. This includes: + +- **Unit type**: This is the list of Unit type(s) for which this metric is computed. Setting the correct Unit type(s) will help experimenters choose the right metric for their experiments. (e.g. user_id, device_id) +- **Application**: This is the list of Application(s) where this metric make sense. For example, an `app_crashes` metrics only makes sense for experimemts running on app platforms. +- **Metric category**: This is the category the metric belongs to. This will make your metric more discoverable. See above. + +All those fields are optional, but we recommend you update your existing metrics as this will improve general discoverability of your metrics. + +### Metric View page +You can now click on the name of any metric across the platform to open the metric's **view page**. +This page will give you a readable overview of the metric and will be the new entry point for managing metrics (editing and creating new versions) as well as many new upcoming features. + +--- + +## Improved Metric Discoverability + +We’ve made it easier to find, understand, and select the right metrics when creating your experiments/templates/features. + +Selecting a primary metric + +### Usability improvement +We totally redesigned the metric selection step of the experiment setup. The goal of the new UI is to make it easier to find and add the right metrics for your experiments. + +### Smarter metric selection in experiments +The metric selection step will show by default the most relevant metrics based on the chosen **unit type** and **application** (make sure to update your metric metadata to get the most out of this new feature). + +Metrics can now also easily be searched by name, tags, owners, etc so you don't have to scroll through your long list of existing metrics to find what you are looking for. + +### Usage insights +While adding metrics to your experiments/templates/features, you can now see how often a metric has been used in past experiments to help you assess its relevance and importance. + +:::tip +To get the most out of these improvements, we recommend reviewing your existing metrics, filling in missing metadata, and adding clear descriptions where needed. +::: + +--- + +## Metric Versioning (Foundations) + +A key part of **metric governance** is **version control**, ensuring that metric definitions are transparent, traceable, and stable over time. +This release lays the groundwork for more robust version management in the future. + +Metric versioning is a critical part of metric governance as it allows for a metric to evolve overtime without risking impacting previous experiments and decisions made using an older version of that metric. + +Metric view page + +### Metric versioning 1.0 +It is now possible for metric owners to create a new version of an existing metric. +This can be done, for example, when the definition of a metric change. + +- Creating a new version of a metric will not impact past and running experiments/features which are using a previous version of that metric. +- Only the latest version of a metric will be discoverable and can be added to new experiments. Experimenters will only be able to see the latest version of each metric. +- Experiments/Features cannot be started when they use an outdated version of a metric. Experimenters will be asked to update to the latest version before they can start the experiment/feature. + +### Edit vs New Version +With the launch of metric versioning, some fields can be edited in the current version of the metric while others will require a new version to be created. + +- **Editable fields**: Fields like Description, Tags, Category, Applications, Tracking units can safely be updated without changing the definition of a metric. +- **Non-editable fields**: All other fields which might have an impact on how the metric is computed or how the result might be interpreted cannot be edited and a new version of the metric will need to be created to be able to change them. + +As a metric owner, you will be able to **edit** and **create new version** from the new Metric view page. + +:::caution +If you are using our API to edit your metrics, you will need you update your script as you will no longer be able to edit all metric fields using the edit end-point. + +A new end-point for creating new metric versions is now available if needed. +::: + +--- + +## What’s Next + +We’re continuing our focus on **general metric improvements** and **metric governance** in the coming sprints. +Upcoming improvements include: + +- **CUPED support** +- **Metric lifecycle** +- **Metric approval workflows** +- **Metric usage overviews and reporting** + +These updates are part of our broader effort to improve trust, transparency, and governance around metrics. + +--- + +## Questions or Feedback? +As always, if you have questions about this release or want to talk about how to get more out of your metrics, reach out to us anytime. + + diff --git a/docs/platform-release-notes/_category_ copy.json b/docs/platform-release-notes/_category_ copy.json new file mode 100644 index 00000000..0e452983 --- /dev/null +++ b/docs/platform-release-notes/_category_ copy.json @@ -0,0 +1,6 @@ +{ + "position": 50, + "collapsible": true, + "collapsed": true, + "label": "Release Notes" +} diff --git a/docs/platform-release-notes/_category_.json b/docs/platform-release-notes/_category_.json index 16bfa566..ea320069 100644 --- a/docs/platform-release-notes/_category_.json +++ b/docs/platform-release-notes/_category_.json @@ -2,5 +2,5 @@ "position": 7, "collapsible": true, "collapsed": true, - "label": "Platform Release Notes" + "label": "Release Notes" } diff --git a/docs/web-console-docs/Configuration/Applications.mdx b/docs/web-console-docs/Configuration/Applications.mdx new file mode 100644 index 00000000..89aa5b85 --- /dev/null +++ b/docs/web-console-docs/Configuration/Applications.mdx @@ -0,0 +1,22 @@ +--- +sidebar_position: 1 +--- + +import Image from "../../../src/components/Image"; + +# Applications + +Applications are the platforms where you can run experiments using ABsmartly. When +creating an experiment, you will be asked to name all applications where the +experiment will be run. + +Applications page + +:::info +To be able to experiment on these applications, you will first need to install the relevant ABsmartly's SDK. +::: \ No newline at end of file diff --git a/docs/web-console-docs/Configuration/Units.mdx b/docs/web-console-docs/Configuration/Units.mdx new file mode 100644 index 00000000..15d02970 --- /dev/null +++ b/docs/web-console-docs/Configuration/Units.mdx @@ -0,0 +1,37 @@ +--- +sidebar_position: 2 +--- + +import Image from "../../../src/components/Image"; + +# Units + +Units are the unique identifiers for the visitors, they are used to track those visitors over time and across your [applications](applications). +Units are also referred to as tracking identifiers. + +For experiments running across different platforms (iOS, Android, web, email, etc.) the unit should be known across all the platforms. +Most likely the authenticated user's user-id. + +For experiments running in apps or parts of the website where the user is not authenticated, you might want to create a unit by device. +anonymous-id or device-id are common names for those. + +For experiments running in email newsletters, you could even have a unit based on the user's email address, +but if you are interested in collecting metrics tracked on the website that the newsletter is linking to, +it's better to use the same unit that is being used for the experiments running on the website. + +Have a look at the [Tracking Unit](/docs/web-console-docs/experiments/creating-an-experiment#tracking-unit) +section of the **Creating an Experiment** docs for more information. + +Units page + +:::info +Not all units are equal, some, like `user_id`, allow for tracking visitors across sessions and even platforms. +Others like `anonymous_id` are much more transient. This means that the same physical user could potentially have several `anonymous_id`, (and as a result see a different variant of your experiment). +This is the case for example when they use several devices or if they clear their cookies between different sessions. +While an `anonymous_id` identifier is likely always available, the `user_id` might only be available in parts of the applications where the visitor is logged in. +::: \ No newline at end of file diff --git a/docs/web-console-docs/Configuration/_category_.json b/docs/web-console-docs/Configuration/_category_.json new file mode 100644 index 00000000..2f6a662a --- /dev/null +++ b/docs/web-console-docs/Configuration/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 6, + "collapsible": true, + "collapsed": true, + "label": "Configuration" +} \ No newline at end of file diff --git a/docs/web-console-docs/settings.mdx b/docs/web-console-docs/Configuration/settings.mdx similarity index 87% rename from docs/web-console-docs/settings.mdx rename to docs/web-console-docs/Configuration/settings.mdx index f0e2929c..91540175 100644 --- a/docs/web-console-docs/settings.mdx +++ b/docs/web-console-docs/Configuration/settings.mdx @@ -2,18 +2,12 @@ sidebar_position: 6 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Advanced Settings The dashboard settings page is your place to set up parts of your experiments. -## Applications - -Here you can declare all of the platforms where your experiments may run. When -creating an experiment, you will be asked to name all applications where the -experiment will be run. - ## API Keys API keys are used to allow access to the API from your applications. @@ -44,23 +38,6 @@ Development environments run both `production` and `development` experiments, bu `development` experiments only collect data from `development` environments, and `production` experiments only collect data from `production` environments. -## Goals - -Goals are the names given to processes that you will later track in your code. - -The goal name will be used in your code, so we recommend using a keyword-like -or kebab-case name. For example, `newsletter_subscription`, `cpu_load_time` or -`bookings`. - -## Metrics - -Metrics are the parameters that will be used to track your goals. They can -give you insights about your business, your users' behavior, the performance -of your system and more! - -For more information on creating a Metric, see the -[Creating a Metric section of the Getting Started guide](/docs/web-console-docs/tutorial#creating-a-metric)! - ## Roles The Roles section allows you to customize and give specific permissions to the users of your @@ -111,29 +88,6 @@ through your goals and experiments to find a specific one. We recommend prefixing each tag with the tag's type. For example, `location:Header`, `stack:Backend` or `psychological:Trust`. -## Teams - -In the teams section, you can declare the teams that will be running -experiments. This will allow you to assign and search for experiments by a -specific team. - -## Units - -Units are the unique identifiers that are going to be used to generate a variant. -For experiments running across different platforms (iOS, Android, web, email, etc.) the -unit should be known across all the platforms. Most likely the authenticated -user's `user-id`. For experiments running in apps or parts of the website where -the user is not authenticated, you might want to create a unit by device. -`anonymous-id` or `device-id` are common names for those. - -For experiments running in email newsletters, you could even have a unit based -on the user’s email address, but if you are interested in collecting metrics -tracked in the website that the newsletter is linking to, it’s better to use -the same unit that is being used for the experiments running in the website. - -Have a look at the [Tracking Unit](/docs/web-console-docs/creating-an-experiment#tracking-unit) -section of the **Creating an Experiment** docs for more information. - ## Users In Users, you can create and give permissions to the users who should have @@ -149,7 +103,7 @@ Webhooks can be used for custom integrations. For example: - Synchronizing the status of a Jira ticket with the status of the respective experiment. -Have a look at our [Slack integration guide](/docs/examples/slack-integration) for an example of how to use Webhooks, +Have a look at our [Slack integration guide](/docs/APIs-and-SDKs/Web-Console-API/examples/slack-integration) for an example of how to use Webhooks, or checkout our [example Github repository](https://github.com/absmartly/examples/tree/main/Javascript/slack-integration) for a more comprehensive code example! diff --git a/docs/web-console-docs/Events/_category_.json b/docs/web-console-docs/Events/_category_.json new file mode 100644 index 00000000..8b2e5ce3 --- /dev/null +++ b/docs/web-console-docs/Events/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 5, + "collapsible": true, + "collapsed": true, + "label": "Events" +} \ No newline at end of file diff --git a/docs/web-console-docs/Events/downloading-events.mdx b/docs/web-console-docs/Events/downloading-events.mdx new file mode 100644 index 00000000..4eb658e2 --- /dev/null +++ b/docs/web-console-docs/Events/downloading-events.mdx @@ -0,0 +1,84 @@ +--- +sidebar_position: 5 +--- + +import Image from "../../../src/components/Image/" + + +# Downloading Events + +Raw event data can be downloaded directly from an experiment's overview page or from the [Events Page](the-events-page). + +## From the Experiment's overview + +Downloading events data directly from an experiment makes it easy to download all experiment related data in a single file directly into your computer. + +To download an experiment's raw event data + +- Navigate to an experiment's overview. +- Click the ... in the top right. +- Select `Export Data` from the dropdown. This option will only be visible if you have the correct permissions to download raw data. +- An export request is made a processed asynchronously. +- When the export file is ready, a notification will show on the report's activity tab. +- Click on the link to download the raw data file. + +::: note +The link to download the file is only valid for 30 days. A new request can be made if the link has expired. +::: + +## From the Event's page + +Downloading events from the Events page makes it possible to select which events will be downloaded and does not restrict events from a single experiment. + +### Exporting Events + +To set up a new export, click `Export` in the top-right hand corner. + +Export events page +Here you will be able to set up an export configuration that runs either once, +or recurrently. + +#### Configure Export + +The first step on the exporter form is to give your configuration a name and +to select how often you want it to run. This can be once, hourly, daily, +weekly or monthly. + +#### Filter Events + +As mentioned in the [Event Filters section](#event-filters), you can filter +your events to fine tune what kind of events are exported. The `Start At` +field is required and is the date and time of the earliest event that you want +to export. For recurring exports, after the first export, the `Start At` event +will be the next event after the last one that was previously exported. + +#### Configure Storage + +The `Configure Storage` section is where you will input the details of your +storage bucket. The fields are described in the following table: + +| Field Name | Description | +| --------------- | ----------------------------------------------------------------------------- | +| **Bucket** | The name of the object storage bucket to export into. | +| **Prefix** | The prefix of the exported filename, which can include / to denote "sub-folders". | +| **Access Key ID** | The access key id of for the object storage bucket. | +| **Secret Key** | The secret access key for the object storage bucket. | +| **Endpoint** | The S3-compatible endpoint to connect to the object storage bucket. **Examples:**
AWS: https://s3.amazonaws.com
GCP: https://storage.googleapis.com
| +| **Format** | The format of the exported data. CSV and TSV include a header row with column names. JSON is a newline-delimited JSON file. Parquet is a very efficient column oriented storage format.| +| **Compression** | The compression method to use. GZIP is widely supported and provides good compression. ZSTD is similar to GZIP in compression level but faster and less widely supported. LZ4 is much faster but provides slightly less compression. None is no compression. | + +:::info Interoperability with GCP to S3 +1. Go to the [Cloud Storage Settings page](http://console.cloud.google.com/storage/settings/) in the [Google Cloud Platform Console](https://console.cloud.google.com/). +2. In Settings, select **Interoperability**. +3. If interoperability has not been set up yet, click **Enable Interoperability Access**. +4. Click **Create new key**. +::: + +Once all of these fields have been filled, you must use the `Test Configuration` +button to make sure that the fields have been filled correctly. If the test is +successful, you can click the `Export Events` button to start exporting! diff --git a/docs/web-console-docs/Events/exposure-events.mdx b/docs/web-console-docs/Events/exposure-events.mdx new file mode 100644 index 00000000..d80eb6b9 --- /dev/null +++ b/docs/web-console-docs/Events/exposure-events.mdx @@ -0,0 +1,123 @@ +--- +sidebar_position: 1 +--- + +import Image from "../../../src/components/Image/" + +# Exposure Events + +## The assignment moment + +Exposure events happen every time the `treatment` function is called to check which variant of an experiment to show a visitor. +This is how it might look in case of an A/B/C experiment. + +```javascript + +context.ready().then(function () { + var exp_assignment = context.treatment("experiment_name"); + if (exp_assignment == 1) { + // insert code to show for Variant 1 + } else if (exp_assignment == 2) { + // insert code to show for Variant 2 + } else { + // insert the Control/Base code + } +}); + +``` + +This `treatment` function call will return the variant assignment. `0` for base, `1` for variant 1 or `2` for variant 2. + +:::info +For a given user identifier, the **treatment** call will always return the same value, making the assignment sticky. +This is a very important feature as this means the same visitor will always be shown the same variant. +::: + +Before sending exposure events, make sure to correctly initialise and enrich the ABsmartly's context with the [visitor's identity](visitors-identity). + +## The events page + +As soon as your code is deployed and visitors enter this part of your product, exposure events will start being appearing in the ABsmartly's [events' page](the-events-page). + +Exposure events on the events page + + +Monitoring events on the events' page is a great way to ensure your tracking is set up properly. + +:::tip +You can even start monitoring the events page before the experiment is started. +::: + +## Understanding exposure events data + +From the events' page, you can inspect any exposure event raw json data by clicking the event on the event's page. + +```ts + +{ + "event_type": "exposure", + "unit_uid": "of2dhFlqHRXpW8iQG9mepw", + "unit_uid_hex": "a1fd9d84596a1d15e95bc8901bd99ea7", + "unit_type": "absId", + "unit_type_id": 42, + "agent": "absmartly-javascript-sdk", + "application": "absmartly.com", + "application_id": 39, + "environment": "Prod", + "environment_id": 3, + "environment_type": "production", + "event_at": 1762888120666, + "unit_attributes": { + "application": "absmartly.com", + "user_agent": { + "device_type": "Mobile", + "crawler": "no", + "value": "Mozilla/5.0 (iPhone; CPU iPhone OS 17_2_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Mobile/15E148 Safari/604.1", + "platform": "iOS", + "browser": "Safari", + "browser_type": "Browser" + }, + "__crawler": false + }, + "experiment_id": 3214, + "experiment_name": "absmartly_site_aa", + "experiment_iteration": 0, + "variant": 0, + "flags": 1, + "properties": null +} + +``` + +Below is an overview of some of the data you will find in the exposure event's raw data. + +| Field | Description | +|-------|--------------| +| **event_type** | This indicates the sort of event. In this case, `exposure`. | +| **unit_type**| The sort of unit used to identify the visitor. | +| **unit_uid**| The visitor's unique identifier for this unit type. One event is sent for each register identifier. | +| **application**| Indicates the application where this event comes from. | +| **event_at**| The event's timestamp. | +| **unit_attributes** | The list of visitor's attributes registered for this event. | +| **experiment_id**| The experiment's id. | +| **experiment_name**| The experiment's name. | +| **variant**| The variant's assignment'. 0 for base. 1 for variant 1. etc. | + +## Special attributes + + + +## Events info & warning + +Exposure events can be labelled with extra information or warning + +| Label | Type | Description | +|-------|--------------|-------------| +| **An experiment with this name was not running at the time of ingestion** | warning | Indicates that the underlying experiment was not running when this event was triggered . | +| **The experiment was full on at the time of ingestion**| info | The experiment related to this event is full on. It is good practice to clean up full on experiments. | \ No newline at end of file diff --git a/docs/web-console-docs/Events/goal-events.mdx b/docs/web-console-docs/Events/goal-events.mdx new file mode 100644 index 00000000..9d63ce0e --- /dev/null +++ b/docs/web-console-docs/Events/goal-events.mdx @@ -0,0 +1,95 @@ +--- +sidebar_position: 2 +--- + +import Image from "../../../src/components/Image/" + + +# Goal Events + +## Capturing user actions + +Goal events are sent by the SDKs when the `track` function is called. +Those events capture visitor actions (`page view`, `button clicked`, `purchase`, etc) with optional contextual data like `price`, `quantity`, and `product_id`, etc. + +```javascript + +// Triggered when a visitor buys a product +context.track("purchase", { + price: 1000, + currency: 'euro', + order_number: '0000982532', + product_id: "ABC123", + category_id: 'ZYZ123' +}); + +``` + +Before sending goal events, make sure to correctly initialise the ABsmartly's context and enrich it with the visitor's attributes. +See the [visitor's identity page](visitors-identity) for more details. + +## The events page + +As soon as your code is deployed and visitors trigger the action, goal events will start being appearing in the ABsmartly's [events' page](the-events-page). + +Goal events on the events page + +You can use this page while debugging and to ensure that events are correctly been triggered when users perform a specific action on your application. + +## Understanding goal events + +You can click any event on the events' page to inspect the event. + +```ts +{ + "event_type": "goal", + "unit_uid": "LTSLmGJb-oGQVJhxElUwtQ", + "unit_uid_hex": "2d348b98625bfa8190549871125530b5", + "unit_type": "absId", + "unit_type_id": 42, + "agent": "absmartly-javascript-sdk", + "application": "absmartly.com", + "application_id": 39, + "environment": "Prod", + "environment_id": 3, + "environment_type": "production", + "event_at": 1763124336749, + "goal_id": 106, + "goal_name": "purchase", + "properties": { + "price": 1000, + "currency": "euro", + "order_number": "0000982532", + "product_id": "ABC123", + "category_id": "ZYZ123" + }, +} +``` + +Below is an overview of some of the data you will find in the exposure event's raw data. + +| Field | Description | +|-------|--------------| +| **event_type** | This indicates the sort of event. In this case, `goal`. | +| **unit_type**| The sort of unit used to identify the visitor. | +| **unit_uid**| The visitor's unique identifier for this unit type. One event is sent for each register identifier. | +| **application**| Indicates the application where this event comes from. | +| **event_at**| The event's timestamp. | +| **unit_attributes** | The list of visitor's attributes registered for this event. | +| **goal_id**| The goal's id. | +| **goal_name**| The goals's name. | +| **properties**| The list of all properties associated with this goal. Those can used as part of the metric computation. | + + +## Events info & warning + +Goal events can be labelled with extra information or warning + +| Label | Type | Description | +|-------|--------------|-------------| +| **A goal with this name was not known at the time of ingestion** | warning | Indicates that the goal does not exist in ABsmartly. | diff --git a/docs/web-console-docs/Events/the-events-page.mdx b/docs/web-console-docs/Events/the-events-page.mdx new file mode 100644 index 00000000..d07bfe37 --- /dev/null +++ b/docs/web-console-docs/Events/the-events-page.mdx @@ -0,0 +1,97 @@ +--- +sidebar_position: 4 +--- + +import Image from "../../../src/components/Image/" + + +# Events Page + +The events page is a great tool for debugging and inspecting goals and exposure events. +To access it, select `Events` from the sidebar. + +## The Events List + +The events page + +When opening the events page, you will be presented with a graph of your +incoming events and a list of the most recent ones. Events are split into +two categories - `Exposures` and `Goals`. + +### Exposure Events + +[Exposure events](exposure-events) are sent by the SDKs when the `treatment` function is called. +The treatment function asks our platform which variant a participant should +see and returns a number between `0` and `3`. + +### Goal Events + +[Goal events](goal-events) are sent by the SDKs when the `track` function is called, and they +include any properties that were passed along with it. + +### Raw Event JSON + +Clicking on a single event will bring up a dialog box with the event's raw JSON data: + +Goal event detail + +Check the [Exposure events](exposure-events) and [Goal events](goal-events) pages for more details on those events + +## Events Filters + +For debugging it can be useful to filter the incoming events to narrow them +down to the specific events that you are looking for. + +The available filters are as follows: + +### Application + +Select the application(s) that you want to see events from. Applications are +created and edited in the [Dashboard Settings](/docs/web-console-docs/configuration/settings). + +### Unit Type + +Select which unit types you want to see events from. Common unit types are `user_id` +or `anonymous_id`, but they could be anything depending on your setup. Unit +Types can be created in the [Dashboard Settings](/docs/web-console-docs/configuration/settings#units). + +### Event Type + +Filter your events to only show `exposure` events or `goal` events, as +described above. This filter can also be preselected by choosing a type when +clicking on the `Events` sidebar navigation. + +### Event Name + +The event name can be extremely useful for debugging. For exposure events, the +event name will be the name of the experiment that was exposed. For goal events, +the event name will be the name of the goal that was tracked. + +### Environment Type + +Choose whether to see events from either only Development environments or only +Production environments. Both types of environment can be set up in the +[Dashboard Settings](/docs/web-console-docs/configuration/settings#environments). + +### Unit UID + +The Unit UID is the most specific form of filtering in the events page. As +mentioned above, each event will be sent with a unique unit - often `user_id` +or `anonymous_id`. These values, when passed into the SDKs, are hashed and +sent to our platform in an encrypted form. If you only want to see values that +have been sent by yourself, you can copy and paste your Unit UID hash into +this box and only your own events will appear in the list. + +## Downloading Events + +See the [downloading events page](downloading-events) for a complete guide to downloading raw events. \ No newline at end of file diff --git a/docs/web-console-docs/Events/visitors-identity.mdx b/docs/web-console-docs/Events/visitors-identity.mdx new file mode 100644 index 00000000..be44d77b --- /dev/null +++ b/docs/web-console-docs/Events/visitors-identity.mdx @@ -0,0 +1,65 @@ +--- +sidebar_position: 3 +--- + +# Visitors Identity + +## What is it? + +The Visitors Identity is used to uniquely identify and track visitors over time and across your products. +There are 2 parts to it, the unique identifiers and the attributes. + +The Visitors Identify is used by the SDKs to create and enrich the experimentation context from which exposure and goals events can be triggered. + +## Visitors Identifiers + +The ABsmartly context needs to be initialised with your visitors' identifier(s). +Those identifiers are used to uniquely identify visitors in experiments through exposure events and +to link them to the goals events they are triggering as they use your products. +What identifiers exist depends on where the experiment is running and what you know about the visitor at that moment. + +Those tracking identifiers (also know as Units) need to be pre-registered on the ABsmartly platform, see [Units](../configuration/units) for more details. + +Below is an example on how the ABsmartly context can be initialised with 3 different unit types. + +```javascript + +const request = { + units: { + user_id: "1234", + anonymous_id: "c6bf86d8c5d81372b8c4ab641551bde6d8", + deviceId: "345", + }, +}; + +const context = sdk.createContext(request); + +``` + +:::info +Not all identifiers are equal, some, like `user_id`, allow for tracking visitors across sessions and even platforms. +Others like `anonymous_id` are much more transient. This means that the same physical person could potentially have several `anonymous_id` (and as a result see a different variant of your experiment). +This is the case for example when they use several devices or if they clear their cookies between different sessions. +While an `anonymous_id` identifier is likey always available, the `user_id` might only be available in parts of the applications where the visitor is logged in. +::: + + + +## Visitors Attributes + +Visitors Attributes are used to enrich the ABsmartly context with any information you might have about this visitor, +for example `language`, `country`, `device`, `power_user`, etc. +Those attributes can then be used for creating targeting audiences (i.e. An experiment targeting only power users) or + for segmenting data on your results page (i.e. Explore experiment results per device type). + +```javascript +context.attribute("user_agent", navigator.userAgent); + +context.attributes({ + power_user: user.isPowerUser ? "yes" : "no", + user_agent: navigator.userAgent, + country: headers["HTTP_CF_IPCOUNTRY"], + language: headers["Accept-Language"], + channel: query_params.utm_medium, +}); +``` \ No newline at end of file diff --git a/docs/web-console-docs/Understanding-experimentation-metrics.mdx b/docs/web-console-docs/Understanding-experimentation-metrics.mdx deleted file mode 100644 index dbb4f7e6..00000000 --- a/docs/web-console-docs/Understanding-experimentation-metrics.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -sidebar_position: 10 ---- - -# Understanding Experimentation Metrics - -Experimentation metrics can be described using many attributes, often combining those attributes together. -In this article we try to explain the most important attributes and what they mean in the context of experimentation. -## Role -In ABsmartly and many other experimentation platforms, metrics are often described as **primary**, **secondary**, **guardrail**, **exploratory**, those -attributes describe the role that the metric plays in the experiment. -### Primary metric -In an experiment the **Primary** metric is the single most important measure used to determine whether the tested change achieves its desired outcome and whether or not the hypothesis is validated or rejected. -It reflects the experiment's primary objective and directly aligns with the business’s strategic goals. -The primary metric is the metric used to inform the experiment design regarding defining the minimum detectable effect (MDE) and the sample size (to ensure sufficient power to detect a meaningful change). - -Examples: -- `revenue_per_visitor` -- `conversion_rate` -- `retention_rate` - -### Secondary metrics -**Secondary** metrics, while not the main decision-making criteria, play a big role in ensuring a comprehensive understanding of the experiment’s impact. -They provide additional context and insights beyond the primary metric and can help detect unintended side effects. - -Examples: -- `items_added_to_cart` -- `product_page_view` -- `banner_interaction` - -### Guardrail metrics -**Guardrail** metrics are safeguards used to monitor and ensure the health, stability, and overall integrity of the system during an experiment. -They do not measure the success of the primary business objectives but are critical for detecting unintended negative impacts on the business, user experience and/or operational performance. -Guardrail metrics act as early warning systems, identifying potential risks such as degraded performance, increased errors, or adverse user behavior before they escalate into larger problems. - -Examples: -- `errors` -- `app_crashes` -- `page_load_time` -- `support_tickets` - -### Exploratory metrics -In ABsmartly, **exploratory** metrics refers to metrics of interest not used in decision-making. -Exploratory metrics are often used in post-analysis and are a great source of insights on top of which new hypotheses can be built. Exploratory metrics should not be used to evaluate the experiment. -## Purpose -A metrics can be described as a **business** metric, a **behavioural** metric or an **operational** metrics. -Those attributes describe the purpose of the metric, what it is measuring.= -### Business -In experimentation, **business** metrics refers to metrics measuring the impact of a change on a business KPI. Business metrics are often used as primary and/or guardrail metrics. - -Examples: -- `revenue_per_visitor` -- `conversion_rate` -- `retention_rate` -- `calls_to_customer_support` - -### Behavioural -**Behavioural** metrics are metrics measuring the impact of a change on the visitor's behaviour. Behavioural metrics are usually measuring the direct impact of a change and as such have high sensitivity. Behavioural metrics are often used as secondary metrics. - -Examples: -- `items_added_to_wishlist` -- `clicks_on_banner` -- `product_page_views` - -### Operational -**Operational** metrics, also known as technical metrics, measure the impact of a change on system performance. Operational metrics can be used as guardrail metrics but also possibly as primary or secondary metrics depending on the goal of the experiment. - -Examples: -- `page_load_time` -- `app_crashes` -- `error_rate` - -## Data structure -All metrics are either **binomial** or **continuous**, this is a reference to how the underlying data is structured and measured. -### Binomial -**Binomial** metrics represent a binary outcome for each visitor in the experiment, where each instance falls into one of two categories (e.g., success/failure, yes/no, 0/1). -They are typically represented as a percentage (ie: 10% conversion rate), binomial metrics follow a normal distribution. Binomial metrics are easier to interpret and communicate. - -Examples: -- `conversion_rate` -- `click_through_rate` (ie: the percentage of users clicking on a link) -- `churn_rate` --` email_open_rate` - -### Continuous -**Continuous** metrics on the other hand can take on a wide range of values (either measured or counted). Continuous metrics often represent quantities or durations. Their underlying distribution varies depending on the data. Continuous metrics are more sensitive (they capture a wider range of data) and offer more insights but they can be heavily influenced by outliers and are harder to interpret. - -Examples: -- `time_on_page` -- `time_to_first_booking` -- `number_of_items_in_cart` -- `revenue_per_visitor` - -## Time horizon -Another aspect of experimentation metrics is their time horizon, typically metrics can be referred to as **short-term** or **long-term**. -### Short-term -**Short-term** metrics refer to metrics that measure immediate or near-term outcomes, typically during or shortly after the experiment. -They can typically be measured accurately in the experiment’s runtime and provide quick feedback on the effects of changes. - -Examples: -- `real_time_conversion_rate` (during the test) -- `time_spent_on_page` -- `click_through_rate` - -### Long-term -On the other hand, **long-term** metrics measure delayed outcomes which make it hard to measure during the runtime of an experiment. -Typically long-term metrics represent the strategic goals and align with the desired business outcomes. Using such a metric for decision making requires adapting the experiment design so it captures this long term impact. - -Examples: -- `true_conversion_rate` (after cancellation and returns have been processed) -- `customer_lifetime_value` -- `long_term_revenue` -- `retention_rate` (over 6 months or more) - -## Functionality -Finally metrics can also be described by how they operate in the context of the experiment. -### Proxy -**Proxy** metrics are indirect measures used to evaluate an outcome that cannot be measured directly (see for the example of long-term metrics above). In experimentation proxy metrics can be used as a replacement for the actual desired goal. There should be a strong correlation between the proxy and the actual goal and this should be validated frequently. - -Examples: -- `time_on_site` as a proxy for engagement -- `click_on_buy_button` as a proxy for conversion - -### Composite -**Composite** metrics combine multiple individual metrics into one measure to capture a nuanced view of success. They are often used strategically but can dilute sensitivity. -Examples: -- `Overall Evaluation Criterion` (OEC) as a weighted combinations of metrics like engagement, revenue, and satisfaction - - - diff --git a/docs/web-console-docs/Users-teams-Permissions/Roles.mdx b/docs/web-console-docs/Users-teams-Permissions/Roles.mdx new file mode 100644 index 00000000..72428442 --- /dev/null +++ b/docs/web-console-docs/Users-teams-Permissions/Roles.mdx @@ -0,0 +1,128 @@ +--- +sidebar_position: 4 +--- + +# Roles + +Roles define what a user can access and modify inside ABsmartly. +They are a key part of the ownership and governance model, making sure that teams can work independently while maintaining control and safety across the organisation. + +ABsmartly supports two types of roles: + +1. **Global roles** for managing global settings permissions. +2. **Team-based roles** for managing **Experiment Assets**. + +:::info +In ABsmartly Experiment Assets are assets created by teams on the day to day basis as part of their experimentation program. +Those Experiment Assets include **Experiments**, **Feature Flag**, **Metrics**, **Goals** and **Templates**. +Access to those Assets can be defined and granted at the global or the team level. +::: + +--- + +## Global roles + +Global roles, are roles granted to a user at the company level. +They provide users with : + +- Access to the platform. +- Ability to manage and customise the platform (create new Units, Applications, Environment). +- Advanced capabilities (manage API Keys, manage Users & Teams, etc). +- Global Access to Experiment Assets. + +:::info +While possible, it is not recommended to assign global read/write access to Experiment Assets like +**Experiments**, **Feature Flag**, **Metrics**, **Goals** and **Templates**. +It is easier and cleaner to set up proper governance when ownership and permissions of Experiment Assets is defined at the team level. +::: + +### Built-in vs Custom roles + +ABsmartly comes with 2 global built-in roles `Base User` and `FullAdmin` which cover most usecases but +custom roles can be created to address specific needs or organisational structure. + +#### The Base User role + +The `Base User` role grants users basic access to the platform. Those users can access the platform and have read-only access to all platform settings. +By default, this role does not provide any permissions to view or edit Experiment Assets like **Experiments**, **Feature Flag**, **Metrics**, **Goals** and **Templates**. +Access to those Experiment Assets will be granted to users as part of their team membership. + +#### The FullAdmin role + +The `FullAdmin` role provides read-write access to everything on the platform, including all Experiment Assets. +This role should only be granted to the person(s) managing the platform. + +#### Custom roles + +While the 2 roles mentioned above cover most usecases, your organisation might have specific needs. +In this case custom roles can be created and assigned to certain users as needed. + +For example, custom roles could be created for allowing some users to create and manage [Units](../configuration/units), +[Applications](../configuration/applications), API Keys, etc which otherwise can only be managed by the `FullAdmin`. + +### Assigning Global Roles + +`FullAdmin` users and other users with custom user management permissions can manage which user has which role at the global level. +This can be done directly by visiting the User's profile or by editing the user in the members' list of the Global Team (the top level team in your team structure). + +--- + +## Team-based roles + +Team-based roles define how a user can interact with Experiment Assets in that team scope. + +:::info +A team scope includes all its child-teams' scopes. This means that if a user can view experiment a certain team, +then this user can also view all experiments created in child teams of that team. +::: + +These roles are the foundation of the ownership model and are designed to make experimentation scalable across multiple product teams. + +Team-based roles grant permissions such as: +- creating experiments or metrics within that team or any child-team. +- starting and stopping experiments. +- viewing team-specific experiments and dashboards. + +Users can belong to multiple teams, and permissions are inherited based on the orgamnisation structure. + +This allows organisations to model their structure accurately, for example: +- Product Team A +- Growth Team +- Reliability or Platform Team +- Country or domain-based teams + +Each team controls its assets, but visibility and editing rights can be extended when collaboration is needed. + +### Built-in Team roles + +#### The Team Viewer role + +A user with the `Team Viewer` can view and comment on, any Experiment Assets in that team scope. + +#### The Team Contributor role + +A user with the `Team Contributor` role can view and comment on, create and manage, any Experiment Assets in that team scope. + +:::caution +Unless it was disabled, users with the `Team Contributor` role can start/stop/full-on all experiments in that team's scope. +::: + +#### The Team Admin role + +Same as `Team Contributor` but the `Team Admin` can also manage the users in that team. + +### Assigning Team roles + +Global `FullAdmin` or users with `Team Admin` on that team, can invite users to a team and grant them one of the team role. + +--- + +## Ownership and Permissions model for Experiment Assets + +Read our [The Owership Model](./ownership-and-permissions) for a more detailed explanation on how to manage ownership and permissions for Experiment Assets. + + + + + + diff --git a/docs/web-console-docs/creating-and-managing-teams.mdx b/docs/web-console-docs/Users-teams-Permissions/Teams.mdx similarity index 95% rename from docs/web-console-docs/creating-and-managing-teams.mdx rename to docs/web-console-docs/Users-teams-Permissions/Teams.mdx index cf096ef2..dc147f59 100644 --- a/docs/web-console-docs/creating-and-managing-teams.mdx +++ b/docs/web-console-docs/Users-teams-Permissions/Teams.mdx @@ -1,13 +1,13 @@ --- -sidebar_position: 16 +sidebar_position: 3 --- -# Creating and managing teams +## Overview Manage your organisation's structure by creating teams, assigning users, and setting up hierarchical relationships between teams. Teams help isolate access to content and provide a more relevant experience for users in large organisations. -## Creating a team +## Create a team To create a new team: @@ -41,7 +41,6 @@ From a team's detail page: Users assigned to a team are also **implicitly members of all its parent teams**. This allows parent teams to automatically include members from all their child teams. -- Implicit members are shown with the label `Inherited member`. - You cannot remove implicit members from a parent team — you must remove them from their direct team instead. ## Editing or archiving teams diff --git a/docs/web-console-docs/Users-teams-Permissions/_category_.json b/docs/web-console-docs/Users-teams-Permissions/_category_.json new file mode 100644 index 00000000..7f0a7ab3 --- /dev/null +++ b/docs/web-console-docs/Users-teams-Permissions/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 4, + "collapsible": true, + "collapsed": true, + "label": "Users, Teams & Permissions" +} \ No newline at end of file diff --git a/docs/web-console-docs/ownership-and-permissions.mdx b/docs/web-console-docs/Users-teams-Permissions/ownership-and-permissions.mdx similarity index 96% rename from docs/web-console-docs/ownership-and-permissions.mdx rename to docs/web-console-docs/Users-teams-Permissions/ownership-and-permissions.mdx index c7e5f73b..db1e56f4 100644 --- a/docs/web-console-docs/ownership-and-permissions.mdx +++ b/docs/web-console-docs/Users-teams-Permissions/ownership-and-permissions.mdx @@ -1,21 +1,27 @@ --- -sidebar_position: 17 +sidebar_position: 5 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; -# How to make use of the ownership and permission model +# The Owership Model Over the last few months we incrementally released many new features related to teams, ownership & collaboration aimed at improving how teams are experimenting and collaborating with each other. In this document we will discuss those changes and how you can start making use of them. +Experiment Assets in scope: **Experiments**, **Features**, **Templates**, **Metrics** & **Goals**. + +All other configuration settings are managed using global roles. For more details see our [Roles](./roles) page. + +--- + ## Team structure The first building block was to make it possible to map your internal team structure in ABsmartly so your teams can be structured just like they are in your organisation. Team structure -:::tip action +:::tip If you have not done so yet, we encourage you to go to /settings/teams and start building a team structure which aligns with the way you are structured and operating (or any other which makes most sense from the organizational point of view). Unless you start building your team hierarchy in ABsmartly you won't be able to start making use of the new features described below. ::: @@ -35,7 +41,7 @@ For backward compatibility, existing assets will retain their current individual While Team Ownership is the new default and preferred way of assigning ownership for Experiments, Features, Templates, Goals & Metrics, it is possible to re-enable user level ownership in the platform settings (/settings/platform). ::: -:::tip action +:::tip To get the most of the new model we encourage you to move the ownership of your existing Experiments, Features, Templates, Goals & Metrics to the most relevant team in your organisation. To do that, just edit assets that you currently own and re-assign ownership to the correct team. Make sure that you are a member of that team if you want to retain your access to that asset. ::: @@ -70,7 +76,7 @@ Team membership can be explicit when a user is directly added to a team but it c For example if a user is added explicitly as `Team Contributor` in team Development, then this user would also be an implicit member of the parent teams (Engineering). Implicit members are, by default, not assigned any particular role in that team. -:::tip action +:::tip Make sure to add all platform users to their respective team and assign them the correct role so they can interact with the team's assets. ::: @@ -117,7 +123,7 @@ For now there are 2 types of possible permissions associated with sharing of ass Following the least privilege principle, and unless there is a good reason, it is usually good practice to grant Can Edit only to relevant individuals. ::: -:::tip action +:::tip Go over your list of Experiments, Features, Templates, Goals & Metrics that you own (or that your team owns) and make sure to share it with the relevant individual(s) or team(s). For example if you want a metric or a goal to be available for everybody to use, then share it with the Global Team with Can View permission. It is possible that you want all your existing metrics, goals and possibly all your experiments, features and templates to be discoverable and usable by anyone on the platform. diff --git a/docs/web-console-docs/_category_.json b/docs/web-console-docs/_category_.json index 8fe23c8d..41e8c79b 100644 --- a/docs/web-console-docs/_category_.json +++ b/docs/web-console-docs/_category_.json @@ -2,5 +2,5 @@ "position": 0, "collapsible": true, "collapsed": true, - "label": "Web Console Tutorial" + "label": "Product Documentation" } diff --git a/docs/web-console-docs/events.mdx b/docs/web-console-docs/events.mdx deleted file mode 100644 index 1b8efd07..00000000 --- a/docs/web-console-docs/events.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -sidebar_position: 7 ---- - -import Image from "../../src/components/Image" - -# The Events Page - -The events page is a great tool for debugging the relationship between your -code and the Web Console. To access it, select `Events` from the sidebar and -choose `All`. - -:::info -If you want to **export ALL the exposure and goal events for a single experiment**, you can do it directly from the experiment overview page by clicking on the ellipsis (…) menu and selecting **Export data**. -::: - -## The Events List - -A screenshot of the events page on the ABsmartly web console - -When opening the events page, you will be presented with a graph of your -incoming events and a list of the most recent ones. Events can be split into -two categories - `Exposures` and `Goals`. - -### Exposure Events - -Exposure events are sent by our SDKs when the `treatment` function is called. -The treatment function asks our platform which variant a participant should -see and returns a number between `0` and `3`. When this question has been asked, -we say that the user has been **exposed** to the experiment. - -### Goal Events - -Goal events are sent by the SDKs when the `track` function is called and they -include any properties that were passed along with it. - -## Raw Event JSON - -Clicking on a single event will bring up a dialog box with the event's raw JSON data: - -A screenshot of one event's raw JSON data - -## Event Filters - -For debugging it can be useful to filter the incoming events to narrow them -down to the specific events that you are looking for. - -The available filters are as follows: - -### Application - -Select the application(s) that you want to see events from. Applications are -created and edited in the [Dashboard Settings](/docs/web-console-docs/settings). - - -### Unit Type - -Select which unit types you want to see events from. Common unit types are `user_id` -or `anonymous_id`, but they could be anything depending on your setup. Unit -Types can be created in the [Dashboard Settings](/docs/web-console-docs/settings#units). - -### Event Type - -Filter your events to only show `exposure` events or `goal` events, as -described above. This filter can also be preselected by choosing a type when -clicking on the `Events` sidebar navigation. - -### Event Name - -The event name can be extremely useful for debugging. For exposure events, the -event name will be the name of the experiment that was exposed. For goal events, -the event name will be the name of the goal that was tracked. - -### Environment Type - -Choose whether to see events from either only Development environments or only -Production environments. Both types of environment can be set up in the -[Dashboard Settings](/docs/web-console-docs/settings#environments). - -### Unit UID - -The Unit UID is the most specific form of filtering in the events page. As -mentioned above, each event will be sent with a unique unit - often `user_id` -or `anonymous_id`. These values, when passed into the SDKs, are hashed and -sent to our platform in an encrypted form. If you only want to see values that -have been sent by yourself, you can copy and paste your Unit UID hash into -this box and only your own events will appear in the list. - -## The Exporter - -A screenshot of the export configurations list - -ABsmartly also has functionality for exporting your events. To do this, click -on `Events` in the sidebar and select `Exports`. - -Here, you will be presented with a list of any previously created exports. For more information on any particular -configuration, you can click on it. - -### Exporting Events - -To set up a new export, click `Export` in the top-right hand corner. - -A screenshot of the export configurations create page - -Here you will be able to set up an export configuration that runs either once, -or recurrently. - -#### Configure Export - -The first step on the exporter form is to give your configuration a name and -to select how often you want it to run. This can be once, hourly, daily, -weekly or monthly. - -#### Filter Events - -As mentioned in the [Event Filters section](#event-filters), you can filter -your events to fine tune what kind of events are exported. The `Start At` -field is required and is the date and time of the earliest event that you want -to export. For recurring exports, after the first export, the `Start At` event -will be the next event after the last one that was previously exported. - -#### Configure Storage - -The `Configure Storage` section is where you will input the details of your -storage bucket. The fields are described in the following table: - -| Field Name | Description | -| --------------- | ----------------------------------------------------------------------------- | -| **Bucket** | The name of the object storage bucket to export into. | -| **Prefix** | The prefix of the exported filename, which can include / to denote "sub-folders". | -| **Access Key ID** | The access key id of for the object storage bucket. | -| **Secret Key** | The secret access key for the object storage bucket. | -| **Endpoint** | The S3-compatible endpoint to connect to the object storage bucket. **Examples:**
AWS: https://s3.amazonaws.com
GCP: https://storage.googleapis.com
| -| **Format** | The format of the exported data. CSV and TSV include a header row with column names. JSON is a newline-delimited JSON file. Parquet is a very efficient column oriented storage format.| -| **Compression** | The compression method to use. GZIP is widely supported and provides good compression. ZSTD is similar to GZIP in compression level but faster, and less widely supported. LZ4 is much faster but provides slightly less compression. None is no compression. | - -:::info Interoperability with GCP to S3 -1. Go to the [Cloud Storage Settings page](http://console.cloud.google.com/storage/settings/) in the [Google Cloud Platform Console](https://console.cloud.google.com/). -2. In Settings, select **Interoperability**. -3. If interoperability has not been set up yet, click **Enable Interoperability Access**. -4. Click **Create new key**. -::: - -Once all of these fields have been filled, you must use the `Test Configuration` -button to make sure that the fields have been filled correctly. If the test is -successful, you can click the `Export Events` button to start exporting! diff --git a/docs/web-console-docs/Aborting-experiments.mdx b/docs/web-console-docs/experiments/Aborting-experiments.mdx similarity index 94% rename from docs/web-console-docs/Aborting-experiments.mdx rename to docs/web-console-docs/experiments/Aborting-experiments.mdx index 066de42f..413eaaec 100644 --- a/docs/web-console-docs/Aborting-experiments.mdx +++ b/docs/web-console-docs/experiments/Aborting-experiments.mdx @@ -2,7 +2,7 @@ sidebar_position: 10 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # When to abort an experiment? @@ -47,7 +47,7 @@ If the treatment introduces a **bug** or **technical failure** that disrupts cor A bug in the checkout flow prevents users from completing transactions, making it imperative to abort the experiment immediately. ### Experiment Health Checks alert -[Health Checks](/docs/web-console-docs/experiment-health-checks) are quality checks run in the background to ensure the experiment and data are reliable. Most health checks alerts would require to abort the experiment and investigate the reason for the alert. +[Health Checks](experiment-health-checks) are quality checks run in the background to ensure the experiment and data are reliable. Most health checks alerts would require to abort the experiment and investigate the reason for the alert. **Example**: The health checks show an assignment conflicts was detected meaning that some visitors have been exposed to more than one variant. This might require the experiment to be stopped and the issue investigated. diff --git a/docs/web-console-docs/Experiment-health-checks.mdx b/docs/web-console-docs/experiments/Experiment-health-checks.mdx similarity index 86% rename from docs/web-console-docs/Experiment-health-checks.mdx rename to docs/web-console-docs/experiments/Experiment-health-checks.mdx index 48c3c9a1..7b93840f 100644 --- a/docs/web-console-docs/Experiment-health-checks.mdx +++ b/docs/web-console-docs/experiments/Experiment-health-checks.mdx @@ -2,14 +2,16 @@ sidebar_position: 12 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Experiment Health Checks ## What are experiment Health Checks? -When running experiments, ensuring their reliability and validity is critical for making data-driven decisions. To achieve this, ABsmartly performs a series of automated health checks in the background. These checks are designed to identify potential issues that could compromise the integrity of the results. Below, we outline the health checks we currently perform and their importance in maintaining experiment quality. +When running experiments, ensuring their reliability and validity is critical for making data-driven decisions. +To achieve this, ABsmartly performs a series of automated health checks in the background. +These checks are designed to identify potential issues that could compromise the integrity of the results. Below, we outline the health checks we currently perform and their importance in maintaining experiment quality. --- ## Where can I see them? @@ -58,7 +60,8 @@ The experiment should be aborted and the reasons for SRM should be investigated. ### Audience Mismatch **What is it?** -Ensures that the actual audience for the experiment matches the intended target audience. This check is only performed when [Audience Enforced is turned off](/docs/web-console-docs/creating-an-experiment#targeting-audience). This serves as a reminder to put the required checks in your code to separate that targeting audience. +Ensures that the actual audience for the experiment matches the intended target audience. +This check is only performed when [Audience Enforced is turned off](/docs/web-console-docs/experiments/creating-an-experiment#targeting-audience). This serves as a reminder to put the required checks in your code to separate that targeting audience. **Why is it important?** It means that a non-intended audience is being exposed to your changes.Including the wrong audience can dilute the experiment’s impact and obscure meaningful insights. @@ -86,7 +89,7 @@ Assess the risk of the interaction and both one or both of the experiments to el ### Variables Conflicting **What is it?** -Checks for conflicting variables used across 2 or more experiments. This check is performed when [Experiment Variables](/docs/web-console-docs/creating-an-experiment#variant-variables) are used in the experiment setup. +Checks for conflicting variables used across 2 or more experiments. This check is performed when [Experiment Variables](creating-an-experiment#variant-variables) are used in the experiment setup. :::caution Variable keys that are prefixed with a double underscore (`__`) are ignored during this health check. diff --git a/docs/web-console-docs/Experiment-reports.mdx b/docs/web-console-docs/experiments/Experiment-reports.mdx similarity index 92% rename from docs/web-console-docs/Experiment-reports.mdx rename to docs/web-console-docs/experiments/Experiment-reports.mdx index e6438434..c199f7f3 100644 --- a/docs/web-console-docs/Experiment-reports.mdx +++ b/docs/web-console-docs/experiments/Experiment-reports.mdx @@ -2,7 +2,7 @@ sidebar_position: 12 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Experiment Reports @@ -79,8 +79,8 @@ Even if an experiment was running only for a few seconds it would appear on the Experiments completed report This report highlights the number of experiment which were successfully completed in the reporting period. -In the case of a [Fixed Horizon Experiment](/docs/web-console-docs/types-of-analysis), it is deemed completed once it reaches its expected sample size or planned duration. -For a [Group Sequential Test](/docs/web-console-docs/types-of-analysis), an experiment is completed once it crosses one of the test boundaries (efficiency or futility). +In the case of a [Fixed Horizon Experiment](overview#analysis-methods), it is deemed completed once it reaches its expected sample size or planned duration. +For a [Group Sequential Test](overview#analysis-methods), an experiment is completed once it crosses one of the test boundaries (efficiency or futility). The report also visualises the outcome of the statistical test on the primary metric at the moment the experiment was completed. The experiment will be shown in `green` if its **primary metric was significant in the expected direction** at the time the experiment was completed. @@ -102,7 +102,7 @@ Knowing that something does not have the expected impact on customer behaviour c In the case of **early stopped experiments** (also known as aborted experiments), the decisions to abort can happen because of bugs in the implementation, early worrying negative signals, strategic reasons or for any other reason. Depending on the reason for early stopping, aborted experiments are typically restarted once the underlying issue is fixed. -Read our [When to abort an experiment?](/docs/web-console-docs/Aborting-experiments) guide to understand more about aborting experiments. +Read our [When to abort an experiment?](Aborting-experiments) guide to understand more about aborting experiments. While aborting experiments is common and part of the process, **early full on**, should be rare as they indicate that changes were pushed without supporting evidence. This can sometimes happen for strategic or legal reasons but because the experiment did not complete, it does not provide the reliable evidence needed to support or discard the underlying hypothesis. @@ -140,7 +140,7 @@ It is nonetheless possible for `Full on` decisions to not be fully supported by Currently a `Full on` decision is shown as supported by evidence **if and only if the experiment was completed and the primary metric is significant in the expected direction**. :::caution -The supported by evidence validation does not currently includes checks on the secondary and guardrail metrics nor on the experiment [health checks](/docs/web-console-docs/experiment-health-checks). +The supported by evidence validation does not currently include checks on the secondary and guardrail metrics nor on the experiment [health checks](experiment-health-checks). ::: @@ -160,7 +160,7 @@ This typically happens when the evidence does not support the hypothesis. This widget provides a view on how many `Abort` decisions were made in the reporting period. Aborting means stopping an experiment before it is completed. Aborting can happen when a bug is found or when early negative signals indicate a possible degradation in certain key metrics but it could also be a strategic choice to stop early. -Read our [When to abort an experiment?](/docs/web-console-docs/Aborting-experiments) guide to understand more about aborting experiments. +Read our [When to abort an experiment?](Aborting-experiments) guide to understand more about aborting experiments. Like with `Keep current` decisions, `Abort` decisons means that the current experience remains but unlike `Keep current` decisions it does not say anything about the hypothesis being tested or not. ## Decisions history diff --git a/docs/web-console-docs/Interpreting-metrics-in-experiment-results.mdx b/docs/web-console-docs/experiments/Interpreting-metrics-in-experiment-results.mdx similarity index 99% rename from docs/web-console-docs/Interpreting-metrics-in-experiment-results.mdx rename to docs/web-console-docs/experiments/Interpreting-metrics-in-experiment-results.mdx index d0162212..d002813f 100644 --- a/docs/web-console-docs/Interpreting-metrics-in-experiment-results.mdx +++ b/docs/web-console-docs/experiments/Interpreting-metrics-in-experiment-results.mdx @@ -2,7 +2,7 @@ sidebar_position: 11 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Guide to interpreting metrics in experiment results diff --git a/docs/web-console-docs/experiments/_category_.json b/docs/web-console-docs/experiments/_category_.json new file mode 100644 index 00000000..6117b07e --- /dev/null +++ b/docs/web-console-docs/experiments/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "collapsible": true, + "collapsed": true, + "label": "Experiments" +} \ No newline at end of file diff --git a/docs/web-console-docs/creating-an-experiment.mdx b/docs/web-console-docs/experiments/creating-an-experiment.mdx similarity index 94% rename from docs/web-console-docs/creating-an-experiment.mdx rename to docs/web-console-docs/experiments/creating-an-experiment.mdx index 1f862b56..dee86879 100644 --- a/docs/web-console-docs/creating-an-experiment.mdx +++ b/docs/web-console-docs/experiments/creating-an-experiment.mdx @@ -1,10 +1,10 @@ --- -sidebar_position: 1 +sidebar_position: 2 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; -# Creating an experiment +# Create an experiment A step by step guide to creating a new experiment in ABsmartly. ## Getting started @@ -13,7 +13,7 @@ To get started with creating a new experiment, you can click **Experiments** on :::info Instead of creating an experiment from scratch you can also decide to use a pre-existing template. -To find out more about creating and using templates check our [template documentation](/docs/web-console-docs/templates). +To find out more about creating and using templates check our [template documentation](templates). ::: ## Basics @@ -66,7 +66,7 @@ The more variants you have, the less traffic each variant will be exposed to and ### Variant Variables Variant variables can be used to automate your experiments. If you, for example, use a configuration file, you can pass a variant variable here and have it overwrite your code. See -[treatment variables](/docs/sdk-documentation/basic-usage#treatment-variables) in the SDK Documentation for more information. +[treatment variables](/docs/APIs-and-SDKs/SDK-Documentation/basic-usage#treatment-variables) in the SDK Documentation for more information. :::caution We do not recommend changing variables on your control variant (**Variant A**). @@ -93,12 +93,12 @@ If the experiment is to run on a part of the product where visitors are not logg ABsmarty does not create nor use any third-party cookie. It is the responsibility of the experimenters to provide a unique identifier for the visitors of the experiment and if needed to store it in a first-party cookie. ::: -You can create new tracking units (there is no limit to the number of identifier you create) in your [dashboard settings](/docs/web-console-docs/settings#units). +You can create new tracking units (there is no limit to the number of identifier you create) in your [dashboard settings](/docs/web-console-docs/Configuration/settings#units). ### Applications **Applications** defined where this experiment will run. An experiment can be running on several platforms at the same time. -These could be `android`, `ios` and `web`, for example, and are also created in your [dashboard settings](/docs/web-console-docs/settings#applications). +These could be `android`, `ios` and `web`, for example, and are also created in your [dashboard settings](/docs/web-console-docs/Configuration/settings#applications). ### Targeting Audience In this section you can define which particular audience will be exposed to the experiment. For example, you could decide to only run the experiment for visitors on an `android` device or visitors whose language is set to `english`. @@ -107,7 +107,7 @@ In this case you might set the filter group to: Experiment targeting english speakers on Android -These audience parameters can then be passed to the SDK in your code using [context attributes](docs/SDK-Documentation/Advanced/context-attributes.mdx). +These audience parameters can then be passed to the SDK in your code using [context attributes](/docs/APIs-and-SDKs/SDK-Documentation/Advanced/context-attributes.mdx). #### Audience Enforced When audience enforced is **on**, only users in the filter groups will be exposed to the experiment. Others will be shown the control variant, but @@ -119,7 +119,7 @@ ABsmartly will only warn you when visitors not matching the audience are exposed ## Metrics In this section you can define the metrics to use to test your hypothesis and to measure the impact of your experiment. -You can learn more about experimentation metrics in our [Understanding Experimentation Metrics](/docs/web-console-docs/understanding-experimentation-metrics) article. +You can learn more about experimentation metrics in our [Understanding Experimentation Metrics](/docs/web-console-docs/goals-and-metrics/metrics/overview) article. ### The Primary Metric The **Primary Metric** is the main metric you are trying to impact as a result of the change. This is the metric which ABsmartly uses to compute the test statistics and it is the main metric which will be used to decide if the hypothesis is tested or not. @@ -194,7 +194,7 @@ It is good practice to have a set of guardrail metrics agreed upon and shared by To improve their usefulness, it is possible to define on the metric’s page, a threshold at which an alert would be triggered so you don’t miss it. Depending on the severity of the issues they signal, **Guardrail Metrics** can be used both for early stopping of experiments and to assess their impact at decision time. -For more information on early stopping of experiments using ABsmartly you can read our [**early stopping guide**](/docs/web-console-docs/aborting-experiments). +For more information on early stopping of experiments using ABsmartly you can read our [**early stopping guide**](aborting-experiments). ### The Exploratory Metrics @@ -222,12 +222,12 @@ Below are a few examples of what Primary, Secondary and Guardrail metrics could ## Analysis In this step, you can choose and set up the analysis type you want to use. -To choose between a Group Sequential or Fixed Horizon analysis check our [analysis type article](/docs/web-console-docs/types-of-analysis). +To choose between a Group Sequential or Fixed Horizon analysis check our [analysis type article](overview#analysis-methods). To continue to properly set up your experiment refer to the relevant setup guide -- [**Group Sequential setup guide**](/docs/web-console-docs/setting-up-a-gst-experiment) -- [**Fixed Horizon setup guide**](/docs/web-console-docs/setting-up-a-fixed-horizon-experiment) +- [**Group Sequential setup guide**](setting-up-a-gst-experiment) +- [**Fixed Horizon setup guide**](setting-up-a-fixed-horizon-experiment) ## Metadata @@ -235,7 +235,7 @@ The metadata step is where you can fill in details about your experiment that ma ### Metadata -The metadata section allows you to select the owners of this experiment, assign it to a particular team and add tags to help with searching and filtering later on. These fields may be required, optional or hidden depending on your company's [platform settings](/docs/web-console-docs/settings#platform-settings). +The metadata section allows you to select the owners of this experiment, assign it to a particular team and add tags to help with searching and filtering later on. These fields may be required, optional or hidden depending on your company's [platform settings](/docs/web-console-docs/Configuration/settings#platform-settings). #### Tags @@ -250,7 +250,7 @@ These allow for you to filter your experiments in the experiments list page, but ### Description -When creating a new experiment, the description section acts as your contract for the test. Allowing you to define for yourself and your team why you are running this experiment, what you hope for the result to be and what will be done after any of the experiment's possible outcomes. The following are our default fields, but they may be different depending on your company's [platform settings](/docs/web-console-docs/settings#platform-settings). +When creating a new experiment, the description section acts as your contract for the test. Allowing you to define for yourself and your team why you are running this experiment, what you hope for the result to be and what will be done after any of the experiment's possible outcomes. The following are our default fields, but they may be different depending on your company's [platform settings](/docs/web-console-docs/Configuration/settings#platform-settings). #### Hypothesis diff --git a/docs/web-console-docs/experiments/overview.mdx b/docs/web-console-docs/experiments/overview.mdx new file mode 100644 index 00000000..9f6943c1 --- /dev/null +++ b/docs/web-console-docs/experiments/overview.mdx @@ -0,0 +1,391 @@ +--- +sidebar_position: 1 +--- + +import Image from "../../../src/components/Image"; + +# Overview + +## What is an experiment + +An experiment evaluates the impact of a product change by comparing the behaviour of users who see the change with those who do not. +ABsmartly makes this possible by assigning users to variants, tracking their actions with goal events, +and analysing the difference between variants with reliable statistics. + +An experiment contains several key elements: + +- **Exposure** +How users enter the experiment and how they are assigned to variants. + +- **Variants** +Different experiences shown to users. + +- **Goals and Metrics** +Events and measurements used to evaluate the impact. + +- **Monitoring** +Automatic checks that help ensure the experiment is healthy and safe to run. + +- **Results and Decisions** +The analysis of variant impact and the decision you take once the data is clear. + +ABsmartly handles all randomisation, data collection, metric computation, and statistical inference so you can focus on learning from your product changes. + +### Exposure and assignment + +When a user reaches an experiment, ABsmartly assigns them to a variant using a deterministic hashing method. This ensures: +- stable assignment +- no cross contamination across variants +- consistent behaviour during the entire experiment +- predictable control of traffic allocation + +[Exposure events](../Events/exposure-events) are sent automatically by the SDK, and these events define when users become part of the analysis dataset. + +### Variants + +Experiments usually include a control variant and one or more treatment variants. Each variant represents a specific user experience. +ABsmartly allows you to configure: +- variant names +- traffic allocation +- rollout rules +- targeting rules +- experiment overrides for testing or QA + +Variants determine what users see, while the metrics determine how those differences are evaluated. + +### Goals and metrics + +Experiments are measured using [goals](../goals-and-metrics/goals/overview) and the [metrics](../goals-and-metrics/metrics/overview) derived from them. + +Examples of goals: + +```javascript +context.track("purchase", { price: 1000 }); +context.track("add_to_cart", { product_id: "ABC123" }); +context.track("view_item", { item_id: "XYZ987" }); +``` + +Metrics take your [goal events](../Events/goal-events) and turn them into meaningful measurements of user behaviour. +They let you answer questions like: + +- how many times something happened +- how many users performed an action +- how much value was generated +- how behaviour changes between variants + +ABsmartly handles all the computation and presents the results in a clear, comparable way across variants. + +### Guardrails and monitoring + +Before experiment results can be trusted, ABsmartly performs several [health checks](Experiment-health-checks) automatically: +- Sample Ratio Mismatch at the experiment level +- assignment and exposure conflicts +- guardrail metric thresholds +- unexpected behaviour in exposure or goal events +- data quality anomalies + +Guardrails help detect harmful side effects and make it possible to [abort experiments](Aborting-experiments) if needed. + +You can also configure guardrail metrics with [thresholds](../goals-and-metrics/metrics/create#metric-threshold-alert) that alert you when impact crosses meaningful limits, +for example when a performance metric becomes slower or when a business KPI shows a potential drop. + +### Analysing results + +ABsmartly uses sound statistical methods to compute the impact of each variant compared to control. +For each metric you will see: +- visitor count +- total value +- mean value per user +- relative impact +- confidence interval +- impact per day + +You can use our guide on [interpreting metrics results](interpreting-metrics-in-experiment-results) to help with the analysis step. + +### Decisions + +Once results are stable and clear, you can record a decision: + +**Full on** +The treatment performs well enough to roll out fully. + +**Keep Current** +Keep the current implementation and either **iterate** or **abandon**. + +**Abort** +The experiment produced a harmful effect or a technical issue made results invalid. + +Decisions are tracked across the platform to support transparency, accountability, and long-term learning. + +--- + +## Analysis methods + +ABsmartly supports two types of statistical analysis: fixed horizon and group sequential. +Both methods compare variant performance, but they differ in how and when you can look at the results. +Understanding the differences between these methods and knowing when to use each can significantly impact the efficiency and accuracy of your experimentation program. + +### Fixed horizon + +Fixed Horizon Testing involves analyzing the results of an experiment after reaching a predefined sample size (number of unique visitors) or +reaching a specific duration. This method, supported by most AB Testing tools, assumes that the sample size is defined before the experiment starts and +remains unchanged throughout the runtime of the experiment. + +While this method is widely used and beneficial, it lacks flexibility, as decisions can only be made at a single predefined moment. +This limitation can lead to unreliable decisions (when experimenters make decisions too early) as well as wasted time and resources. +This makes the use of Fixed Horizon testing for product experimentation, where trust, speed, and agility are crucial, less beneficial and more challenging. +This is especially true for teams with less experience. + +Fixed horizon uses a 2-sided test, meaning it evaluates whether the observed effect is significantly in either direction (positive or negative). +Results in a 2 sided-test can be significantly positive, significantly negative or insignificant. + +### Group sequential + +[Group Sequential Testing](https://absmartly.com/gst) is an adaptive analysis method that allows for interim analyses at various points during the experiment. +At ABsmartly you can decide how often or how many interim analyses you want. +A Group Sequential approach provides the flexibility to stop the experiments early for efficacy or for futility. + +Setting up a Group Sequential Test + +While adding more interim analysis will slightly reduce statistical power compared to fixed-horizon testing, overall it greatly speeds up decision-making, as significance is commonly reached before the full sample is collected. This efficiency gained from using Group Sequential Testing is making a real difference to ABsmartly customers, to the pace at which decisions can be made. + +A Group Sequential Test result + +Unlike Fixed Horizon, Group Sequential Testing uses a 1-sided test, meaning it evaluates whether the observed effect is significant only in the expected direction. Results in a 1-sided test can either be significant in the expected direction or insignificant. + +:::info +Different experimentation platforms might use different sequential testing implementation. +The most commonly used sequential method is Fully Sequential and while it offers the most flexibility (decisions can be made at any moment in time), +it comes at the cost of much lower power which in turn leads to higher time to decision. +At ABsmartly we believe Group Sequential Testing provides the right compromise between flexibility and speed which is required to make high-quality +product decisions in a fast-moving business context. +::: + +### How to choose? + +Both methods ensure reliable results, but group sequential analysis provides more flexibility, while fixed horizon follows a more traditional “run to completion” approach. +Most of the time, Group Sequential Testing should be the preferred method (who does not want faster trustworthy results?), but there are a few use cases where you might decide to use a Fixed Horizon setup. This is mainly when you are dealing with a strong novelty effect (Group Sequential Testing might come to a premature conclusion which might not reflect the true impact) or where you have a long action cycle and wish to observe the visitors for a pre-defined period of time. + +Because it is a 2-sided test, Fixed Horizon is a better choice if differentiating between inconclusive and significantly negative results is important. + +--- + +## Server-side vs client-side experiments + +ABsmartly supports both server-side and client-side experimentation seamlessly. +Each approach has different strengths depending on where the change is implemented and how data flows through your system. + +### Server-side experiments + +Server-side experiments are implemented inside your backend or API layer. The variant assignment happens before the response is sent to the client. + +**How it works** +Your backend receives the experiment key, retrieves the assigned variant from the SDK, and returns different data or logic depending on the variant. + +**Use cases** +- Pricing logic +- Ranking and recommendation algorithms +- API responses +- Feature toggles +- Checkout and order processing +- Any business logic that must run before rendering + +**Benefits** +- High performance and stable assignment +- No flicker or visible changes after load +- Works regardless of device or browser +- Ideal for experiments that influence logic rather than UI +- Better security and protection against manipulation +- Can be shipped immediately without any code change + +**Things to consider** +- Requires backend engineering work +- QA may take longer +- Changes often require deployments + +### Client-side experiments + +Client-side experiments run inside the user’s browser. The SDK assigns a variant, and client-side code applies the appropriate UI changes. + +**How it works** +The frontend calls `context.track`. The SDK returns the assigned variant, and the UI is updated accordingly. + +**Use cases** +- Simple Layout and visual changes +- Styling and copy updates +- Interaction tweaks +- Landing page optimisation +- Experiments that do not require backend involvement + +**Benefits** +- Very fast iteration +- No backend deployment needed +- Ideal for design and UX teams +- Non engineers to create visually in the [Visual Editor](../launchpad-browser-extension/getting-started) + +**Things to consider** +- Must be implemented correctly to avoid flicker +- Not suitable for logic or sensitive values such as pricing +- Some browsers block scripts which can impact exposure if not configured well +- Network timing can affect when changes appear +- Must be re-implemented fully in code when shipped and before the experiment can be cleaned-up + +### How to choose + +Choose **server side** when: +- the change affects APIs, data or business logic +- correctness and performance are important +- full consistency across the user journey is required + +Choose **client-side** when: +- the change is purely visual and simple +- you want fast trial and error +- backend changes are not possible or needed + +Many teams use both approaches together. ABsmartly fully supports hybrid experimentation. + +--- + +## Experiment lifecycle + +Experiments move through several stages from creation to completion. Each stage reflects where the experiment is in its setup, execution, or decision flow. + +### Draft +The experiment has been created but is **not yet ready to run**. +At this stage you typically: +- define the hypothesis +- configure variants +- choose goals and metrics +- set targeting and traffic allocation + +Nothing is live, and the experiment is not visible to end users. + +### Ready +The experiment is fully configured and reviewed. +It can be started in development or production at any time once the team is ready. +No traffic is assigned yet. + +### Development +The experiment is running in dev mode. +During this phase: +- developers add the experiment key and variant logic +- teams QA the experience +- exposure events may appear only from testing environments + +The experiment is not yet running for real users. + +### Running +The experiment is live, and real users are being assigned to variants. + +During this stage: +- traffic is split according to the configured allocation +- metrics begin accumulating +- guardrails and monitoring are active +- you can review interim results depending on the chosen analysis method (fixed horizon or group sequential) +- experiment setup can no longer be edited (some metada can still be edited) + +This continues until the experiment is stopped, either when it reaches significance or when it is aborted. + +### Stopped +The experiment has been manually or automatically stopped. + +Common reasons include: +- reaching the predefined horizon +- detecting a harmful effect +- encountering operational issues +- pausing development or product rollout + +Traffic is no longer entering the experiment. + +### Full on +A final decision has been made to fully release the treatment. + +In this stage: +- the winning variant is rolled out to one hundred percent of traffic +- the experiment is locked for editing +- results remain accessible for reference +- the decision is tracked in the decision history + +This marks the successful completion of the experiment. + +### Archived +The experiment is closed and archived. + +- the code has been cleaned-up of experiment code +- it will not appear in active experiment lists +- results and metadata remain stored for historical context and learning + +Archiving keeps your workspace clean while preserving your experimentation history. + +--- + +## Ownership & permissions + +`Experiments` are Managed-Assets and, as such, follow a specific [ownership model](/docs/web-console-docs/users-teams-permissions/ownership-and-permissions). + +### Ownership + +An experiment can be owned by 1 or more teams and, if the feature was enabled for your organisation, individual users. + +:::info +Team ownership is generally a better fit for governance because it creates stability, resilience, and accountability at the right level. + +A team persists even when individuals change roles, leave, or shift priorities, so the metric keeps a reliable steward over time. +Expertise is usually distributed across a group rather than held by one person, which reduces risks from single-point knowledge and avoids bottlenecks. +Team ownership is better suited to review changes, ensure consistency, and maintain quality. +::: + +### Permissions + +The following permissions exist when managing and working with `experiments`. + +## Experiment permissions + +| Permission | Description | +|-----------|-------------| +| **Admin experiments** | Grants full administrative control over all experiments, including permissions, visibility, and configuration settings across the workspace or team. | +| **Archive an experiment** | Allows archiving an experiment. Archiving removes it from active lists while preserving all results and metadata. | +| **Comment on an experiment** | Permits adding comments to an experiment for collaboration, reviews, or decision discussions. | +| **Create an experiment** | Allows creating a new experiment from scratch or from a template. | +| **Create an experiment from template** | Permits creating a new experiment specifically using an existing experiment template. | +| **Start an experiment in development** | Allows setting the experiment status to *Development*, enabling implementation and QA. | +| **Edit an experiment** | Grants permission to modify an experiment’s configuration before it is running (variants, goals, metrics, targeting, etc.). | +| **Full-on an experiment** | Allows marking the experiment as *Full on*, indicating the winning variant is rolled out to all traffic. | +| **Get an experiment** | Allows viewing a specific experiment and its metrics. | +| **List experiments** | Grants access to view the list of all experiments in the workspace or team. | +| **Restart an experiment** | Allows restarting an experiment after it has been stopped, sending new traffic into the experiment. | +| **Start an experiment** | Allows changing an experiment’s status to *Running*, making it live for real users. | +| **Stop an experiment** | Allows stopping a running experiment. New users will no longer enter the experiment. | +| **Unarchive an experiment** | Allows restoring a previously archived experiment so it becomes active again. | + +#### Global access + +Permission to create and manage `experiments` can be granted to the relevant users through their [role](../configuration/settings#roles) at the platform level. + +:::info +It is not recommended to provide access to `metrics` to non platform admin users at the platform level. +::: + +#### built-in team level roles + +Permission to create and manage `experiments` can be provided to the relevant users at the team level by granting them the correct role in that team. + +| Permission | Description | +|-------------------------|--------------| +| **Team Admin** | Grants full control over experiments owned by that team. | +| **Team Contributor** | Grant ability to create, start, stop and to manage experiments in the team scope. | +| **Team Viewer** | Grant ability to view and list experiments owned by the team. | + +:::info +Team roles are inherited, so if a user is a `Team Contributor` in a team, then this user would also be a `Team Contributor` in all child teams. +::: + +### Sharing experiments + +While `experiments` are owned by teams, they can be shared with other teams and individual across the organisation. + +| Permission | Description | +|-------------|--------------| +| **can_view** | Grants this user or team the ability to view this experiment. | +| **can_edit** | Grants this user or team the ability to edit/start/stop to this experiment. | + diff --git a/docs/web-console-docs/setting-up-a-fixed-horizon-experiment.mdx b/docs/web-console-docs/experiments/setting-up-a-fixed-horizon-experiment.mdx similarity index 94% rename from docs/web-console-docs/setting-up-a-fixed-horizon-experiment.mdx rename to docs/web-console-docs/experiments/setting-up-a-fixed-horizon-experiment.mdx index 94e601b8..0821e96b 100644 --- a/docs/web-console-docs/setting-up-a-fixed-horizon-experiment.mdx +++ b/docs/web-console-docs/experiments/setting-up-a-fixed-horizon-experiment.mdx @@ -2,7 +2,7 @@ sidebar_position: 4 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Setting up a Fixed Horizon Experiment @@ -11,7 +11,7 @@ A step by step guide to set up and configure a Fixed Horizon experiment. ## Type of Analysis A Fixed Horizon test can be configured on the Analysis step (Step 5) of the experimentation setup. -To understand the differences between Group Sequential and Fixed Horizon experiment check our [analysis type article](/docs/web-console-docs/types-of-analysis). +To understand the differences between Group Sequential and Fixed Horizon experiment check our [analysis type article](overview#analysis-methods). Choosing an analysis type @@ -71,7 +71,8 @@ Can it be tracked reliably? The metric should be easy to measure accurately and If the metric you are selecting is not new and we have already collected data for it, you will see a button to show the metrics performance graph. -This graph shows the metric's variance and standard deviation over the last 6 weeks for your selected platform(s) and tracking unit (or performance reason, the underlying query samples 10% of overall visitors to compute the results). +This graph shows the metric's variance and standard deviation over the last 6 weeks for your selected platform(s) and tracking unit +(or performance reason, the underlying query samples 10% of overall visitors to compute the results). This is an indication of what you can expect if you start your experiment now with this metric as your primary metric. diff --git a/docs/web-console-docs/setting-up-a-gst-experiment.mdx b/docs/web-console-docs/experiments/setting-up-a-gst-experiment.mdx similarity index 98% rename from docs/web-console-docs/setting-up-a-gst-experiment.mdx rename to docs/web-console-docs/experiments/setting-up-a-gst-experiment.mdx index 75e36c7e..d306c934 100644 --- a/docs/web-console-docs/setting-up-a-gst-experiment.mdx +++ b/docs/web-console-docs/experiments/setting-up-a-gst-experiment.mdx @@ -2,8 +2,8 @@ sidebar_position: 3 --- -import Image from "../../src/components/Image"; -import FutureDate from "../../src/components/FutureDate"; +import Image from "../../../src/components/Image"; +import FutureDate from "../../../src/components/FutureDate"; # Setting up a Group Sequential Experiment @@ -12,7 +12,7 @@ A step by step guide to set up and configure a Group Sequential experiment. ## Type of Analysis A Group Sequential test can be configured on the Analysis step (Step 5) of the experimentation setup. -To understand the differences between Group Sequential and Fixed Horizon experiment check our [analysis type article](/docs/web-console-docs/types-of-analysis). +To understand the differences between Group Sequential and Fixed Horizon experiment check our [analysis type article](overview#analysis-methods). Choosing an analysis type diff --git a/docs/web-console-docs/templates.mdx b/docs/web-console-docs/experiments/templates.mdx similarity index 98% rename from docs/web-console-docs/templates.mdx rename to docs/web-console-docs/experiments/templates.mdx index dad2e433..58baf996 100644 --- a/docs/web-console-docs/templates.mdx +++ b/docs/web-console-docs/experiments/templates.mdx @@ -2,11 +2,11 @@ sidebar_position: 8 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; -# Creating and Using Templates +# Experiment Templates -## Introduction +## Overview **Templates** are designed to streamline the process of creating experiments and feature flags. By using templates, you can quickly set up experiments with pre-configured settings, avoiding the need to start from scratch each time. This not only speeds up the process but also ensures consistency and quality across all experiments and feature flags. ## Benefits of using templates diff --git a/docs/web-console-docs/feature-flags/_category_.json b/docs/web-console-docs/feature-flags/_category_.json new file mode 100644 index 00000000..fe84395d --- /dev/null +++ b/docs/web-console-docs/feature-flags/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "collapsible": true, + "collapsed": true, + "label": "Feature Flags" +} \ No newline at end of file diff --git a/docs/web-console-docs/creating-a-feature.mdx b/docs/web-console-docs/feature-flags/creating-a-feature.mdx similarity index 74% rename from docs/web-console-docs/creating-a-feature.mdx rename to docs/web-console-docs/feature-flags/creating-a-feature.mdx index 8b8641fa..8d737555 100644 --- a/docs/web-console-docs/creating-a-feature.mdx +++ b/docs/web-console-docs/feature-flags/creating-a-feature.mdx @@ -2,8 +2,8 @@ sidebar_position: 5 --- -import Image from "../../src/components/Image"; -import FutureDate from "../../src/components/FutureDate"; +import Image from "../../../src/components/Image"; +import FutureDate from "../../../src/components/FutureDate"; # Creating A Feature Flag @@ -25,24 +25,22 @@ dashboard. The Variants step of the Feature Flag creation form. Feature flags can have up to four variants. Each variant can be given a name, -have screenshots added to it and can be assigned some [variant variables](/docs/SDK-Documentation/basic-usage#treatment-variables). +have screenshots added to it and can be assigned some [variant variables](/docs/APIs-and-SDKs/SDK-Documentation/basic-usage#treatment-variables). ## Audiences The Audiences step of the Feature Flag creation form. The audiences step allows you to choose who will see your feature flag. You -choose your [unit type](/docs/web-console-docs/creating-an-experiment#tracking-unit), -and [applications](/docs/web-console-docs/creating-an-experiment#applications) -and can then select a [targeting audience](/docs/web-console-docs/creating-an-experiment#targeting-audience) -to refine the flag to a specific group of users. +choose your [unit type](/docs/web-console-docs/configuration/units), +and [applications](/docs/web-console-docs/configuration/applications) +and can then select a targeting audience to refine the flag to a specific group of users. ## Metrics The Metrics step of the Feature Flag creation form. -In the [metrics](/docs/web-console-docs/creating-an-experiment#metrics) step, -you can choose which metrics you want to track for the duration of your +In this step, you can choose which metrics you want to track for the duration of your feature flag. Unlike experiments though, feature flag primary metrics are not required. @@ -51,7 +49,7 @@ required. The Metadata step of the Feature Flag creation form. Here, you can add any additional metadata to your feature flag. This step can -be customised in [the Platform Settings](/docs/web-console-docs/settings#platform-settings) +be customised in [the Platform Settings](/docs/web-console-docs/configuration/settings#platform-settings) to include any other fields you may need. ## Review diff --git a/docs/web-console-docs/goals-and-metrics/_category_.json b/docs/web-console-docs/goals-and-metrics/_category_.json new file mode 100644 index 00000000..f3e9b399 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 3, + "collapsible": true, + "collapsed": true, + "label": "Goals & Metrics" +} \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/goals/_category_.json b/docs/web-console-docs/goals-and-metrics/goals/_category_.json new file mode 100644 index 00000000..f35f6bb6 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/goals/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true, + "label": "Goals" +} \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/goals/create.mdx b/docs/web-console-docs/goals-and-metrics/goals/create.mdx new file mode 100644 index 00000000..18ea3431 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/goals/create.mdx @@ -0,0 +1,42 @@ +--- +sidebar_position: 2 +--- + +import Image from "../../../../src/components/Image/" + +# Create a new Goal + +## Manual + +Goals can be created in the ABsmartly's console by navigating to `Settings` and then clicking on `Goals`. + +Creating a new goal + +| Field | Description | +|-------|--------------| +| **Goal Name** | The unique goal name. This is the name which needs to be used in when calling `track` | +| **Description**| Describes what this goal tracks. | +| **Owners**| The list of goal owners. Owners can be Teams or individuals. | +| **Tags**| Tags used to better describe the goal and improve discoverability. | + +:::tip +Tags are useful for searching, filtering and classifying your goals. +We recommend prefixing each tag with the tag's type. For example, location:Header, stack:Backend or psychological:Trust. +::: + +## Auto-create + +Goals can be created automatically when a [goal event](/docs/web-console-docs/events/goal-events) with a new goal name is processed by ABsmartly. + +:::info +The feature must be enabled for your goals to be auto-created. When in doubt, reach out to ABsmartly Support. +::: + +:::caution +Auto-created goals do not have an owner. For governance reasons, make sure to claim ownership of a goal before you start using it. +::: \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/goals/overview.mdx b/docs/web-console-docs/goals-and-metrics/goals/overview.mdx new file mode 100644 index 00000000..22ef516f --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/goals/overview.mdx @@ -0,0 +1,100 @@ +--- +sidebar_position: 1 +--- + +# Overview + +## Goals + +`Goals` capture the fundamental events that occur during an experiment. +They represent actions that are meaningful to the product or business — such as viewing a page, clicking a button, adding an item to a cart, or completing a purchase. +Each goal is defined by a name and any associated contextual data (for example, `product ID`, `price`, or `category`). +Goals are the building blocks of measurement: they define what to observe in the experiment. +Goals are the raw data used to compute [Metrics](../metrics/overview). + +## Goal Events + +`Goals` are registered ABsmartly when the `track` function is called from the SDK. + +```javascript +// Triggered when a visitor buys a product +context.track("purchase", { + price: 1000, + currency: 'euro', + order_number: '0000982532', + product_id: "ABC123", + category_id: 'ZYZ123' +}); +``` + +In the example above, a `goal` with the name **purchase** was triggered. This goal has additional property information which can be used to compute the metric. +To learn more about `events` you can read the dedicated [Goal Events page](../../events/goal-events) in the knowledge base. + + +## Ownership & permissions + +`Goals` are Managed-Assets and, as such, follow a specific [ownership model](/docs/web-console-docs/users-teams-permissions/ownership-and-permissions). + +### Ownership + +A goal can be owned by 1 or more teams and, if the feature was enabled for your organisation, individual users. + +:::info +Team ownership is generally a better fit for governance because it creates stability, resilience, and accountability at the right level. + +A team persists even when individuals change roles, leave, or shift priorities, so the goal keeps a reliable steward over time. +Expertise is usually distributed across a group rather than held by one person, which reduces risks from single-point knowledge and avoids bottlenecks. +Team ownership is better suited to review changes, ensure consistency, and maintain quality. +::: + +:::info +Auto-created `goals` do not have an owner when created. +For governance reasons, make sure to claim ownership of a goal before you start using it. +::: + +### Permissions + +The following permissions exist when managing and working with `goals`. + +| Permission | Description | +|-------------------------|----------------------------| +| **Admin goals** | Grants full administrative control over goals, including managing permissions, visibility, and configuration settings for all goals within the workspace or team. | +| **Archive a goal** | Allows archiving a goal that is no longer in use, removing it from active lists while keeping its history for reference. | +| **Create a goal** | Enables the creation of new goals by defining event names, parameters, and associated metadata. | +| **Edit a goal** | Allows modification of existing goal definitions, such as updating the event name, description, or metadata. | +| **Get a goal** | Permits viewing the details of a specific goal, including its configuration and usage across experiments or metrics. | +| **List goals** | Grants access to view the list of all available goals within the workspace or team. | +| **Unarchive a goal** | Allows restoring a previously archived goal, making it active and available for use again. | + + +#### Global access + +Permission to create and manage `goals` can be granted to the relevant users through their [role](../../configuration/settings#roles) at the platform level. + +:::info +It is not recommended to provide access to `goals` to non platform admin at the platform level. +::: + +#### built-in team level roles + +Permission to create and manage `goals` can be provided to the relevant users at the team level by granting them the correct role in that team. + +| Permission | Description | +|-------------------------|--------------| +| **Team Admin** | Grants full control over goals owned by that team. | +| **Team Contributor** | Grants ability to create and to manage goals in the team scope. | +| **Team Viewer** | Grants ability to view and list goals owned by the team. | + +:::info +Team roles are inherited, so if a user is a `Team Contributor` in a team then this user would also be a `Team Contributor` in all child teams. +::: + +### Sharing goals + +While `goals` are owned by teams, they can be shared with other teams and individual across the organisation. + +| Permission | Description | +|-------------|--------------| +| **can_view** | Grants this user or team the ability to view and use this goal. | +| **can_edit** | Grants this user or team the ability to contribute to this goal. | + diff --git a/docs/web-console-docs/goals-and-metrics/metrics/_category_.json b/docs/web-console-docs/goals-and-metrics/metrics/_category_.json new file mode 100644 index 00000000..343950dd --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "collapsible": true, + "collapsed": true, + "label": "Metrics" +} \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/categories.mdx b/docs/web-console-docs/goals-and-metrics/metrics/categories.mdx new file mode 100644 index 00000000..cd8c665a --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/categories.mdx @@ -0,0 +1,18 @@ +--- +sidebar_position: 3 +--- + +# Metrics Categories + +## Overview + +Metric category is a label that groups related metrics, so teams can find, compare, and report on them consistently across experiments. +It helps users understand the intent of a metric at a glance, standardizes dashboards and filters, and improves governance by keeping similar measures together. + +Use a category to describe what a metric is about, it can represent direct business outcomes, engagement signals, or technical performance indicators. + +Frequently used categories include `conversion`, `engagement`, `retention`, `revenue`, etc but feel free to create and manage categories that best suit your needs. + +## Create & Manage + +You can create and manage Metrics Categories by going to `Settings` and clicking on `Metric Categories`. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/create.mdx b/docs/web-console-docs/goals-and-metrics/metrics/create.mdx new file mode 100644 index 00000000..4fff5c13 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/create.mdx @@ -0,0 +1,460 @@ +--- +sidebar_position: 2 +--- + +import Image from "../../../../src/components/Image"; + + +# Create a new Metric + +To create a new metric, navigate to the Metric's catalog and click on `Create metric` + +## Global Settings + +| Field | Description | +|-------|--------------| +| **Metric name** | This name of the metric must be unique across the entire platform. | +| **Owners**| The list of team and individual owners for this metric. | + +:::info +Metric's global settings are shared across all versions of a metric. +Future changes to those fields will apply to all existing and future versions of a metric. +::: + +## Details & Metadata + +The Metric details and Metadata sections provide some advanced information about the metric. +This information is version-specific and, as such, can differ between different versions of the same metric. + +| Field | Description | +|-------|--------------| +| **Description** | Providing a good description is important as it helps experiments choose the right metric.| +| **Impact**| Directionality of the metric. `positive` means that increasing this metric is good. `negative` means that decreasing the metric is good. Use `unknown` if the metric has no preferred direction.| +| **Metric type**| Describes how the metric is implemented. Choose between [`count`](metric-types/goal-count), [`goal unique count`](metric-types/goal-unique-count), [`ratio`](metric-types/ratio), [`property`](metric-types/property), [`retention`](metric-types/retention), [`time to achievement`](metric-types/time-to-achievement) and [`unique property count`](metric-types/unique-property-count). | +| **Tags** (optional)| Makes the metric more discoverable by providing relevant [tags](/docs/web-console-docs/configuration/settings#tags).| +| **Category** (optional)| The [category](categories) the metric belongs to (`conversion`, `performance`, etc). While optional, selecting a category makes it easier for experimenters to find relevant metrics. | +| **Applications** (optional)| Select the [applications](../../configuration/applications) where this metric makes sense. Only experiments running on these applications should track this metric. While optional, setting the correct `Application(s)` helps experimenters find relevant metrics for their experiments. | +| **Units** (optional)| Select the [tracking units](../../configuration/units) for which this metric is computed. The tracking unit of the metric should match the tracking unit of the experiments where it is tracked. While optional, setting the correct `Tracking Unit(s)` will help experimenters track relevant metrics in their experiments. | + +## Goal + +In this section, you can select and configure the [goal](../goals/overview) which provides the raw data for this metric. + +:::info +This part of a metric’s setup is version-controlled. +To edit those fields, a new version of the metric will need to be created. +::: + +```javascript +// Triggered when a visitor buys a product +context.track("purchase", { + price: 1000, + currency: 'euro', + order_number: '0000982532', + product_id: "ABC123", + category_id: 'ZYZ123' +}); +``` + +### Goal + +Select the [goal](../goals/overview) to use as a source for this metric. +In the example above, the name of the goal is `purchase`. + +If you don't see a goal in the list, make sure it exists and make sure you have the correct permissions to view and use it. + +:::info +Auto-created goals do not have an owner when created. For governance reasons, make sure to claim ownership of a goal before you start using it for your metric. +::: + +### Property + +In the case of [`Property`](metric-types/property) or [`Unique Property Count`](metric-types/unique-property-count) metrics, +you also need to choose which property of the metric to use. +For example, a `revenue` metric built using the goal event above, would use the event property **price**. + +### Property filters + +Property filters let you include only the goal events whose properties match specific conditions. +These filters apply to the properties inside the event payload. +With the example above, we might want to create a `conversion` metric but only for product of a certain category, +in that case a **category_id** property filter can be used to filter only the relevant **purchases** events. + +From that event, we could also create a `revenue (euro)` metric by filtering events by **currency**. + +### Time filters + +Every visitor becomes exposed to an experiment at a specific moment. We call it the **first-seen** event. +That timestamp is the reference point for all time-based filters. + +A time filter lets you include only the goal events that happen within a defined window after that first exposure. +This ensures your metric measures behavior in a controlled, meaningful period (for example, `purchases within the first hour` or `engagement in the first 7 days`). + +### Outliers + +Outlier limits help control the influence of extreme metric values. +They reduce noise and variance by **capping values that sit outside a defined range to the value of the boundary**. + +You can choose one of four methods to manage outliers: + +:::caution +Outlier limits can reduce variance and make metrics more stable, but they also change the underlying data. +When used incorrectly, they may hide meaningful effects or introduce bias in experiment results. +Always ensure your limits reflect real-world constraints rather than engineering convenience, and avoid tuning them in response to specific experiment outcomes. +::: + +#### Unlimited + +This is the default. This option applies no outlier treatment. All values are kept exactly as they appear in the event data. +Use this option when you want full fidelity of raw data and are not concerned about extreme values skewing results. + +#### Quantile + +This method filters values based on chosen quantiles. +You define a lower quantile and an upper quantile (for example, 0.05 and 0.95). +Values below the lower quantile or above the upper quantile are capped to the limit. + +How it behaves with the sample purchase event: +``` +If you set: +- Lower quantile: `0.05` +- Upper quantile: `0.95` + +Then: +- If `1000` falls between the 5th and 95th percentile of all purchase prices, it is kept as it is. +- If `1000` is higher than the 95th percentile (for example if most prices are between 10 and 300), +it is capped to the upper quantile boundary's value. +``` + +Quantile filtering is useful when your data contains long tails or rare extreme highs. + +#### Standard Deviation + +This method caps values based on the mean and standard deviation of all observed values. +You enter multipliers that determine how far from the mean the lower and upper limits should be. + +The limits are calculated as: + +``` +- Lower limit: mean − (stdev × lower multiplier) +- Upper limit: mean + (stdev × upper multiplier) +``` + +Any value outside these limits is capped to the value of the boundary. + +Example with the sample purchase event: +Suppose across all purchases: + +``` +- mean price = 200 +- standard deviation = 150 + +If you set: +- Lower multiplier = 1 +- Upper multiplier = 2 + +Then: +- Lower limit = 200 − (150 × 1) = 50 +- Upper limit = 200 + (150 × 2) = 500 + +Our event's price: +- 1000 > 500 → it is capped to 500. + +Setting multipliers to 0 effectively disables trimming on that side: +- Lower multiplier 0 → lower limit = mean → keeps all values below the mean +- Upper multiplier 0 → upper limit = mean → keeps all values above the mean + +Setting both to 0 is the equivalent of setting no outlier limit. +``` + +Standard deviation limits are helpful when extreme values are several standard deviations from the average. + +#### Fixed + +This option lets you define explicit numeric boundaries. Any value below the lower limit or above the upper limit **is removed**. + +Example with the sample purchase event: + +``` +- Lower limit value: −1000 +- Upper limit value: 1000 + +Then: +- price = 1000 → exactly at the upper limit → kept + +If you change to: +- Lower limit value: 0 +- Upper limit value: 500 + +Then: +- price = 1000 → above 500 → capped to 500 +``` + +Fixed limits are useful when you already know the acceptable bounds for your metric (e.g., price can never be negative or above a business-defined maximum). + +### Goal Relations + +Goal relations let you connect different goal events that represent follow-up actions on the same underlying item. +They are useful when events such as `cancellations`, `refunds`, or `replacements` should modify the way a metric is calculated. + +For example, if a purchase is later refunded, you may want the refund event to reduce the revenue metric. +Or if a purchased item is replaced with a different product, you may want to adjust the associated value. + +Goal relations define how one goal (the current one) is linked to another goal (the foreign one) using a shared identifier. +This identifier is provided through a property in both events (for example **order_number**, **purchase_id**, or **transaction_id**). + +Each relation type has its own behaviour and additional fields. + +#### Cancellation + +A cancellation relation ties a “current” goal to a cancellation event belonging to the same item. +This is useful when the current goal should be counted only if it has not been cancelled. +This makes it possible to create metrics such as `Net Conversions` which only accounts for purchases which were not cancelled, +giving experimenters a more accurate view of the actual business impact of their changes. + +Goal Relations - Cancellation + +Below is a sample `cancellation` events cancelling the purchase event made above. + +```javascript +// Triggered when a visitor cancels a purchase +context.track("purchase_cancelled", { + order_number: '0000982532' +}); +``` +To configure a cancellation relation, you need to provide: + +- Current goal key property +This is the property on the current goal event that identifies the item. +Example for a **purchase** event: **order_number**. +- Foreign goal +This is the goal event that represents the cancellation. +Example: **purchase_cancelled**. +- Foreign goal key property +This is the property on the cancellation event that refers to the same item. +Example: also **order_number**. + +When both events share the same key value, the cancellation will be applied to the current goal according to the metric’s logic +(for example, removing the contribution of that purchase). + +#### Refund + +A refund relation is used when a purchase can generate one or more refund events that should adjust the metric. + +Refund relations have additional configuration, as refund events may include both an identifier and a refund amount. + +Goal Relations - Refund + + +```javascript +// Triggered when a visitor gets refunded +context.track("refund", { + order_number: '0000982532', + refund_amount: 1000, +}); +``` + +To configure a refund relation, you define: + +- Current goal key property +The identifier on the current goal (**order_number**). +- Refund operation +How the refund should affect the metric (for example, subtracting refunded value, replacing value, or another supported operation). +- Foreign goal +The goal event representing the refund (**refund**). +- Foreign goal key property +Identifier on the refund event used to match the purchase. +- Foreign goal value property +The numeric value to use for the refund adjustment, such as **refund_amount**. +- Duplicated foreign goal aggregation +Determines how to combine multiple refund events referring to the same item (sum, max, last, etc.). + +This relation ensures that metrics involving values such as `revenue` reflect the impact of refunds accurately providing a more accurate view on the impact of the change. + +#### Replacement + +A replacement relation applies when the original event is substituted by another event of the same category. +A typical use case is item replacements: a customer receives a replacement product instead of the original purchase. + +Replacement relations allow the metric to use information from the new event rather than the original one. + +Goal Relations - Replacement + +```javascript +// Triggered when a visitor gets a replacement +context.track("replacement", { + order_number: '0000982532', + replacement_price: 800, +}); +``` + +Configuration includes: + +- **Current goal key property** +Identifier on the current event (e.g., **order_number**). +- **Foreign goal** +The event representing the replacement (e.g., **replacement**). +- **Foreign goal key property** +The matching identifier on the replacement event (e.g., **order_number**). +- **Foreign goal value property** +Value from the replacement event that should override or adjust the original. For example, **replacement_price**. +- **Duplicated foreign goal aggregation** +How the value of the foreign goal achievement will be aggregated +when there are multiple foreign goal achievements that match the **Foreign goal relation key property**. + +There are five options: + +| Aggregation Type | Description | +|------------------|----------------------------------------------------------------------------------------------------------| +| Pick first | The first foreign goal achievement will be used to update the value of the current goal. | +| Pick last | The last foreign goal achievement will be used to update the value of the current goal. | +| Sum | The sum of all foreign goal achievements will be used to update the value of the current goal. | +| Minimum | The minimum value of all foreign goal achievements will be used to update the value of the current goal. | +| Maximum | The maximum value of all foreign goal achievements will be used to update the value of the current goal. | + +Replacement relations allow the metric to reflect up-to-date values when items are swapped or reissued. + +## Format, scale and precisions + +This section controls how your metric’s **Value** and **Mean** are displayed in the results table. +It does not change how the metric is calculated: it only affects the formatting applied when values are shown in the metric's tables. + +:::info +This part of a metric’s setup is version-controlled. +To edit those fields, a new version of the metric will need to be created. +::: + +Gross Conversion Rate metric table to illustrate how Value and Mean are shown + +You can customise three aspects for both **Value** and **Mean**: +- Format +- Scale +- Precision + +These settings allow you to present the metric in a readable and meaningful way depending on the type of data (counts, currency, percentages, ratios, etc.). + +### Value + +The Value column in the results table displays the total summed value of your metric for each variant. +You can control how this number appears using: + +#### Format +A string template that determines how the number is wrapped or displayed. +For example: +- `{}` → shows the raw number +- `{} €` → appends a currency symbol +- `$ {}` → prefixes a dollar sign +- `{} items` → useful for quantity metrics + +#### Scale + +A multiplier applied before formatting the output. +Common uses: +- `1` → show the value as-is +- `0.01` → convert cents to euros +- `100` → convert a ratio to a percentage during display + +Example: +If the raw total value is 1500.123456 and scale is 1, the displayed value might be 1,500. + +#### Precision + +How many decimal places to show. +Examples: +- `0` → no decimals (ideal for counts) +- `2` → two decimals (useful for money) +- `3` → three decimals (fractions, averages, conversion rates) + +### Mean + +The Mean column displays the average metric value per participant for each variant. +You can format it separately because mean values often need different formatting than totals (e.g., percentages vs absolute numbers). + +#### Format + +Same behaviour as for Value. +Examples: +- `{}` → raw +- `{}%` → percentage +- `${}` → currency + +#### Scale + +Scale is especially important for means: + +For conversion rate metrics, raw mean tends to be a ratio (0.0485). +With scale = 100, it becomes 4.85. + +For monetary means, if your underlying value is in cents, scaling by 0.01 converts to dollars or euros. + +#### Precision + +How many decimal places to show. +Examples: +- `0` → no decimals (ideal for counts) +- `2` → two decimals (useful for money) +- `3` → three decimals (fractions, averages, conversion rates) + +## Metric threshold alert + + +A metric threshold alert notifies you when the observed effect of a metric crosses a value you consider important. +Instead of waiting for full statistical significance, the alert triggers as soon as the relative effect becomes large enough (either positively or negatively) to matter. + +The alert works by watching the confidence interval (CI) of the impact. + +:::info +This part of a metric’s setup is version-controlled. +To edit those fields, a new version of the metric will need to be created. +::: + + +### Falls below threshold + +Use this when you want to be alerted if the experiment becomes worse than a certain relative value. + +For example: +- Threshold: -2% +- Alert triggers when: upper CI bound < -2% + +This means even the optimistic estimate is still worse than –2 percent, so the effect could be harmful. +In this case and to prevent further loss it might be worth stopping the experiment and investigating what is happening. + +Metric alert threshold + +This can help you detect dangerous regressions early. + +### Rises above threshold + +Use this when you want to be notified if the experiment becomes better than a certain value. + +For example: +- Threshold: +5% +- Alert triggers when: lower CI bound > +5% + +This indicates that even the conservative estimate of the effect exceeds your target improvement. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/_category_.json b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/_category_.json new file mode 100644 index 00000000..4a5a2b52 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 4, + "collapsible": true, + "collapsed": true, + "label": "Metric Types" +} \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/goal-count.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/goal-count.mdx new file mode 100644 index 00000000..a43345b2 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/goal-count.mdx @@ -0,0 +1,34 @@ +--- +sidebar_position: 1 +--- + +# Goal Count + +## Overview + +Counts how many times a specific goal event is triggered, even if the same user fires it multiple times. + +## Examples + +```javascript +context.track("purchase", { + price: 1000, + order_number: "0000982532", + product_id: "ABC123", + category_id: "ZYZ123" +}); +``` + +Given a `purchase` event shown above, a `Total purchases` metric could be created. +This metric would simply **count all the purchases** made by participants in the experiment. + +Using a metric's filter it is also possible to create `Total purchases Category ZYZ123` from such events. +In this case, the metric would count all the purchases of product from category ZYZ123. + +## Good to know + +What you need to know about `Goal Count` metrics. + +- Good for measuring total activity volume. +- Simple to understand and compute. +- Can be skewed by heavy users who trigger the event repeatedly. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/goal-unique-count.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/goal-unique-count.mdx new file mode 100644 index 00000000..dab78587 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/goal-unique-count.mdx @@ -0,0 +1,31 @@ +--- +sidebar_position: 2 +--- + +# Goal Unique Count + +## Overview + +Counts how many unique users triggered a goal event at least once. + +## Examples + +```javascript +context.track("purchase", { + price: 1000, + order_number: "0000982532", + product_id: "ABC123", + category_id: "ZYZ123" +}); +``` + +From this event, a `Unique purchasers` metric could be created. +This metric counts how many distinct users triggered the purchase event, regardless of how many times they purchased. + +A filtered version such as `Unique purchasers (Category ZYZ123)` would count only users who purchased from category ZYZ123. + +## Good to know + +- Great for conversion-style metrics. +- More stable than total counts since each user counts once. +- Does not capture purchase volume per user. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/property.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/property.mdx new file mode 100644 index 00000000..2830e23d --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/property.mdx @@ -0,0 +1,27 @@ +--- +sidebar_position: 4 +--- + +# Property + +## Overview + +Aggregates a numerical value included in the event payload, such as price, quantity, or duration. + +## Examples + +```javascript +context.track("purchase", { + price: 1000, + order_number: "0000982532", + product_id: "ABC123", + category_id: "ZYZ123" +}); +``` + +A common example is `Revenue`, which would sum the `price` for all `purchase` events. + +## Good to know + +- Essential for money- or quantity-based metrics. +- Sensitive to outliers; clipping or CUPED may help. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/ratio.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/ratio.mdx new file mode 100644 index 00000000..df061d40 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/ratio.mdx @@ -0,0 +1,64 @@ +--- +sidebar_position: 7 +--- + +# Ratio + +## Overview + +A Ratio metric compares two related metrics by dividing one value (the numerator) by another (the denominator). +This creates a normalized measure that reveals the relationship between the two quantities rather than their absolute values. + +Ratio metrics are useful when you want to understand efficiency, conversion, quality, or per-user averages, because they remove scale effects and highlight how one metric behaves relative to another. + +## Examples + +Suppose you track the following events: + +```javascript +context.track("click", { + element: "add_to_cart", + product_id: "ABC123" +}); + +context.track("page_view", { + page: "product_detail", + product_id: "ABC123" +}); +``` + +You could build a `Click-through rate ratio` metric defined as: +- **Numerator**: `Clicks per visitor` +- **Denominator**: `Page views per visitor` + +This produces a meaningful relative measure that answers: +“Out of all page views, how often do users click the add-to-cart button?” + +More examples: +- `Average revenue per order` +-- Numerator: Revenue +-- Denominator: Orders + +- `Purchases per active session` +-- Numerator: Purchase count +-- Denominator: Session count + +- `Errors per request` (for performance / reliability metrics) +-- Numerator: Error events +-- Denominator: API requests + +- `Unique products purchased per purchaser` +-- Numerator: Goal Property Unique Count (product_id) +-- Denominator: Unique purchasers + +In all cases, the ratio reveals relative performance rather than total scale. + +## Good to know + +- Ratio metrics are derived metrics: they depend on two other existing metrics. +- They allow you to normalize behaviors across traffic volumes, user counts, or content types. +- Fluctuations in either numerator or denominator can change the ratio — interpreting ratios requires understanding both sides. +- They are highly effective for comparing efficiency (e.g., clicks per page view), value (e.g., revenue per user), or quality (e.g., errors per request). +- Outlier settings, goal filters, and definitions of numerator and denominator are inherited from their underlying metrics. +- A ratio metric does not directly count events — instead, it evaluates the relationship between two already-defined metrics. +- Changes to numerator or denominator definitions typically require versioning, since a ratio metric’s meaning depends entirely on those definitions. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/retention.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/retention.mdx new file mode 100644 index 00000000..7347ae2e --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/retention.mdx @@ -0,0 +1,62 @@ +--- +sidebar_position: 6 +--- + +# Retention + +## Overview + +A `Goal Retention` metric measures how many users return to complete a goal again after a defined period of time. +It tells you whether users continue performing the same action after a delay, rather than only measuring their initial behavior. + +You can track retention either: + +- from the moment a user is first exposed to the experiment, or +- from the moment the user first completes the goal itself + +This makes it possible to measure longer-term engagement and whether your experiment influences users’ likelihood to come back and repeat an action. + +## Examples + +```javascript +context.track("purchase", { + price: 1000, + order_number: "0000982532", + product_id: "ABC123", + category_id: "ZYZ123" +}); +``` + +Imagine you want to measure how many users who first purchased **make another purchase within 7 days**. + +You can create a `Purchase Retention (7 days)` metric by: +- Selecting the `purchase` goal +- Setting the `Retention Period` to 7 days +- Choosing whether the retention window starts from: +-- the user’s first exposure, or +-- the user’s first purchase + +Then the metric counts users who satisfy: +- they completed the initial purchase +- and they completed another purchase after 7 days, within the configured retention window + +**More examples** + +- `Checkout Recovery (24 hours)`: +Users who returned to complete a purchase within 24 hours after first adding to cart. + +- `Content Return (3 days)`: +Users who read an article again within 3 days after their first reading. + +- `Subscription Renewal (30 days)`: +Users who returned to perform a renewal action after a 30-day cycle + +## Good to know + +- Great for measuring long-term impact rather than immediate conversions. +- Helps identify behaviors such as repeat purchases, content revisits, subscription renewals, or delayed engagement. +- The metric counts users, not events — a user either meets retention criteria or does not. +- Filters on the goal event apply before evaluating retention (for example: “retention among users who purchased category ZYZ123”). +- Retention metrics are often more stable and less noisy than total repeated activations, since each user contributes at most once. +- Changing the retention period alters the meaning of the metric and will requires a new version. +- Useful for experiments where the impact is delayed: onboarding experiences, notifications, emails, recommendations, reminders, etc. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/time-to-achievement.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/time-to-achievement.mdx new file mode 100644 index 00000000..c2e4bdc2 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/time-to-achievement.mdx @@ -0,0 +1,28 @@ +--- +sidebar_position: 3 +--- + +# Time to Achievement + +## Overview + +Measures how long it takes a user to reach a specific goal after entering an experiment. + +## Examples + +```javascript +context.track("purchase", { + price: 1000, + order_number: "0000982532", + product_id: "ABC123", + category_id: "ZYZ123" +}); +``` + +A possible `Time to Achievement` metric created from this event could be `Time to first purchase`. + +## Good to know + +- Great for understanding friction and delays in the user journey. +- Can be skewed by users who never reach the goal. +- Can have a very long tail. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/metric-types/unique-property-count.mdx b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/unique-property-count.mdx new file mode 100644 index 00000000..2e0cd6de --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/metric-types/unique-property-count.mdx @@ -0,0 +1,35 @@ +--- +sidebar_position: 5 +--- + +# Unique Property Count + +## Overview + +Counts how many unique values of a specific property appear in the goal events triggered by users. +Instead of counting users, this metric measures the variety or diversity within a property, such as the number of different products purchased, +the number of distinct categories reached, etc. + +This metric helps you understand breadth rather than volume or frequency. + +## Examples + +```javascript +context.track("purchase", { + price: 1000, + order_number: "0000982532", + product_id: "ABC123", + category_id: "ZYZ123" +}); +``` + +From this event, a `Unique products purchased` metric could be created by selecting the **product_id** property. + +## Good to know + +- Great for understanding variety, breadth, and coverage within your goals. +- Useful for marketplaces, catalog browsing, search, merchandising, and content variety analyses. +- Each property value is counted once, regardless of how many times it appears. +- Filtering the goal event first (e.g., to a category or price range) affects which values contribute to the unique count. +- Outlier settings do not apply here since the metric counts distinct strings or identifiers, not numeric values. +- Can complement total count or user-based metrics by revealing diversity even when volume is stable. \ No newline at end of file diff --git a/docs/web-console-docs/goals-and-metrics/metrics/overview.mdx b/docs/web-console-docs/goals-and-metrics/metrics/overview.mdx new file mode 100644 index 00000000..c324ae48 --- /dev/null +++ b/docs/web-console-docs/goals-and-metrics/metrics/overview.mdx @@ -0,0 +1,268 @@ +--- +sidebar_position: 1 +--- + +# Overview + +## Experimentation Metrics + +Metrics are aggregations or computations derived from [goals](../goals/overview). +They transform raw event data into interpretable measures that quantify the effect of an experiment. + +Metrics summarize performance over a group of experiment visitors — for example, `conversion rate`, `Average revenue per visitor`, or `click-through rate`. + +Metrics can represent direct business outcomes, engagement signals, or technical performance indicators, +and are often grouped into [categories](categories) such as `conversion`, `engagement`, `retention`, or `revenue`. + +## Understanding Experimentation Metrics + +Experimentation metrics can be described using many attributes, often combining those attributes together. +In this page we try to explain the most important attributes and what they mean in the context of experimentation. + +### Role +In ABsmartly and many other experimentation platforms, metrics are often described as **primary**, **secondary**, **guardrail**, **exploratory**, those +attributes describe the role that the metric plays in the experiment. + +#### Primary metric +In an experiment the **Primary** metric is the single most important measure used to determine whether the tested change achieves its desired outcome and whether or not the hypothesis is validated or rejected. +It reflects the experiment's primary objective and directly aligns with the business’s strategic goals. +The primary metric is the metric used to inform the experiment design regarding defining the minimum detectable effect (MDE) and the sample size (to ensure sufficient power to detect a meaningful change). + +Examples: +- `revenue_per_visitor` +- `conversion_rate` +- `retention_rate` + +#### Secondary metrics +**Secondary** metrics, while not the main decision-making criteria, play a big role in ensuring a comprehensive understanding of the experiment’s impact. +They provide additional context and insights beyond the primary metric and can help detect unintended side effects. + +Examples: +- `items_added_to_cart` +- `product_page_view` +- `banner_interaction` + +#### Guardrail metrics +**Guardrail** metrics are safeguards used to monitor and ensure the health, stability, and overall integrity of the system during an experiment. +They do not measure the success of the primary business objectives but are critical for detecting unintended negative impacts on the business, user experience and/or operational performance. +Guardrail metrics act as early warning systems, identifying potential risks such as degraded performance, increased errors, or adverse user behavior before they escalate into larger problems. + +Examples: +- `errors` +- `app_crashes` +- `page_load_time` +- `support_tickets` + +#### Exploratory metrics +In ABsmartly, **exploratory** metrics refers to metrics of interest not used in decision-making. +Exploratory metrics are often used in post-analysis and are a great source of insights on top of which new hypotheses can be built. Exploratory metrics should not be used to evaluate the experiment. + +### Purpose +A metrics can be described as a **business** metric, a **behavioural** metric or an **operational** metrics. +Those attributes describe the purpose of the metric, what it is measuring.= + +#### Business +In experimentation, **business** metrics refers to metrics measuring the impact of a change on a business KPI. Business metrics are often used as primary and/or guardrail metrics. + +Examples: +- `revenue_per_visitor` +- `conversion_rate` +- `retention_rate` +- `calls_to_customer_support` + +#### Behavioural +**Behavioural** metrics are metrics measuring the impact of a change on the visitor's behaviour. Behavioural metrics are usually measuring the direct impact of a change and as such have high sensitivity. Behavioural metrics are often used as secondary metrics. + +Examples: +- `items_added_to_wishlist` +- `clicks_on_banner` +- `product_page_views` + +#### Operational +**Operational** metrics, also known as technical metrics, measure the impact of a change on system performance. Operational metrics can be used as guardrail metrics but also possibly as primary or secondary metrics depending on the goal of the experiment. + +Examples: +- `page_load_time` +- `app_crashes` +- `error_rate` + +### Data structure +All metrics are either **binomial** or **continuous**, this is a reference to how the underlying data is structured and measured. + +#### Binomial +**Binomial** metrics represent a binary outcome for each visitor in the experiment, where each instance falls into one of two categories (e.g., success/failure, yes/no, 0/1). +They are typically represented as a percentage (ie: 10% conversion rate), binomial metrics follow a normal distribution. Binomial metrics are easier to interpret and communicate. + +Examples: +- `conversion_rate` +- `click_through_rate` (ie: the percentage of users clicking on a link) +- `churn_rate` +-` email_open_rate` + +#### Continuous +**Continuous** metrics on the other hand can take on a wide range of values (either measured or counted). Continuous metrics often represent quantities or durations. Their underlying distribution varies depending on the data. Continuous metrics are more sensitive (they capture a wider range of data) and offer more insights but they can be heavily influenced by outliers and are harder to interpret. + +Examples: +- `time_on_page` +- `time_to_first_booking` +- `number_of_items_in_cart` +- `revenue_per_visitor` + +### Time horizon +Another aspect of experimentation metrics is their time horizon, typically metrics can be referred to as **short-term** or **long-term**. + +#### Short-term +**Short-term** metrics refer to metrics that measure immediate or near-term outcomes, typically during or shortly after the experiment. +They can typically be measured accurately in the experiment’s runtime and provide quick feedback on the effects of changes. + +Examples: +- `real_time_conversion_rate` (during the test) +- `time_spent_on_page` +- `click_through_rate` + +#### Long-term +On the other hand, **long-term** metrics measure delayed outcomes which make it hard to measure during the runtime of an experiment. +Typically long-term metrics represent the strategic goals and align with the desired business outcomes. Using such a metric for decision making requires adapting the experiment design so it captures this long term impact. + +Examples: +- `true_conversion_rate` (after cancellation and returns have been processed) +- `customer_lifetime_value` +- `long_term_revenue` +- `retention_rate` (over 6 months or more) + +### Functionality +Finally metrics can also be described by how they operate in the context of the experiment. + +#### Proxy +**Proxy** metrics are indirect measures used to evaluate an outcome that cannot be measured directly (see for the example of long-term metrics above). In experimentation proxy metrics can be used as a replacement for the actual desired goal. There should be a strong correlation between the proxy and the actual goal and this should be validated frequently. + +Examples: +- `time_on_site` as a proxy for engagement +- `click_on_buy_button` as a proxy for conversion + +#### Composite +**Composite** metrics combine multiple individual metrics into one measure to capture a nuanced view of success. They are often used strategically but can dilute sensitivity. +Examples: +- `Overall Evaluation Criterion` (OEC) as a weighted combinations of metrics like engagement, revenue, and satisfaction + + +## Metric Versioning + +Metrics are **version-controlled** to ensure that your experiment results remain stable, interpretable, and historically accurate. +When a metric definition changes, its meaning changes and that can impact how past and ongoing experiments would be understood. +To prevent this, changes to certain fields require a new version of the metric to be created. + +Versioning ensures that: + +- Historical results remain trustworthy: +Experiments that used an older version of the metric will always continue to use that exact definition, so their numbers do not change retroactively. +- Metric definitions are transparent and reproducible: +You can always refer back to earlier versions and understand exactly how a metric was constructed at any point in time. +- Teams can evolve metrics safely: +You can improve outlier handling, adjust filters, or refine properties without affecting other teams or ongoing experiments. +- Experiments remain comparable over time: +Versioning prevents silent drift in metric definitions that would otherwise make comparisons unreliable. + +Versioning gives you confidence that when you modify a metric, you are not rewriting the past, and your experiment results remain consistent and dependable. + +:::note +While past and current experiments can make use of an older version of a metric, only the currently **active** version of a metric can be added to new experiments. +::: + +Each metric contains configuration fields that play different roles in versioning. +To balance flexibility with historical accuracy, fields fall into three categories: + +### Editable and shared across all versions + +These fields belong to the metric itself, not to a specific version. +If you edit them, the change applies to every version of the metric. + +- `Name`. To ensure consistency and discoverability, the name of a metric needs to be the same across all versions of a metric. +- `Owner`. To better enforce ownership and governance, metric's owners must own the entire history of a metric including all its past versions. + +### Editable and version-specific fields + +These fields define the behaviour of a certain version of a metric. +Editing them only modifies the current version, and does not impact older versions. + +- All fields in the `Metrics Detail` section. +- All fields in the `Metadata` section. + +These fields allow you to enrish the metric's version without altering the meaning of historical results. + +### Non-editable, version-specific fields + +These fields define the core logic of the metric: how values are extracted, filtered, capped, or related to other goals. +Those fields are **immutable** and tied to a certain version of the metric. + +A new version of the metric must be created to be able to change those fields. + +Locked fields include: +- All fields in the `Goal` section. +- All fields in the `Format, Scale & Precision` section. +- All fields in the `Metric threshold alert` section. + +Locking these fields ensures that metrics remain stable and reproducible over time, and that historical experiment results never change unexpectedly. + +## Ownership & permissions + +`Metrics` are Managed-Assets and, as such, follow a specific [ownership model](/docs/web-console-docs/users-teams-permissions/ownership-and-permissions). + +### Ownership + +A metric can be owned by 1 or more teams and, if the feature was enable for your organisation, individual users. + +:::info +Team ownership is generally a better fit for governance because it creates stability, resilience, and accountability at the right level. + +A team persists even when individuals change roles, leave, or shift priorities, so the metric keeps a reliable steward over time. +Expertise is usually distributed across a group rather than held by one person, which reduces risks from single-point knowledge and avoids bottlenecks. +Team ownership is better suited to review changes, ensure consistency, and maintain quality. +::: + +### Permissions + +The following permissions exist when managing and working with `metrics`. + +| Permission | Description | +|-------------------------|----------------------------| +| **Admin metrics** | Grants full administrative control over metrics, including managing permissions, visibility, and configuration settings for all metrics within the workspace or team. | +| **Archive a metric** | Allows archiving a metric that is no longer in use, archign a metric archive **all the versions** of that metric. | +| **Create a metric** | Enables the creation of new metrics. | +| **Edit a metric** | Allows modification of existing metric definitions and for the creation of new versions of that metric. | +| **Get a metric** | Permits viewing the details of a specific metric, including its configuration and usage across experiments. | +| **List metrics** | Grants access to view the list of all available metrics within the workspace or team. | +| **Unarchive a metric** | Allows restoring a previously archived metric | + + +#### Global access + +Permission to create and manage `metrics` can be granted to the relevant users through their [role](../../configuration/settings#roles) at the platform level. + +:::info +It is not recommended to provide access to `metrics` to non platform admin users at the platform level. +::: + +#### built-in team level roles + +Permission to create and manage `metrics` can be provided to the relevant users at the team level by granting them the correct role in that team. + +| Permission | Description | +|-------------------------|--------------| +| **Team Admin** | Grants full control over metrics owned by that team. | +| **Team Contributor** | Grant ability to create and to manage metrics in the team scope. | +| **Team Viewer** | Grant ability to view and list metrics owned by the team. | + +:::info +Team roles are inherited, so if a user is a `Team Contributor` in a team, then this user would also be a `Team Contributor` in all child teams. +::: + +### Sharing metrics + +While `metrics` are owned by teams, they can be shared with other teams and individual across the organisation. + +| Permission | Description | +|-------------|--------------| +| **can_view** | Grants this user or team the ability to view and make use of this metric in their experiments. | +| **can_edit** | Grants this user or team the ability to edit to this metric. | + diff --git a/docs/LaunchPad Browser Extension/_category_.json b/docs/web-console-docs/launchpad-browser-extension/_category_.json similarity index 83% rename from docs/LaunchPad Browser Extension/_category_.json rename to docs/web-console-docs/launchpad-browser-extension/_category_.json index 17a9a064..ed532a50 100644 --- a/docs/LaunchPad Browser Extension/_category_.json +++ b/docs/web-console-docs/launchpad-browser-extension/_category_.json @@ -1,5 +1,5 @@ { - "position": 5, + "position": 7, "collapsed": true, "collapsible": true, "label": "LaunchPad Browser Extension" diff --git a/docs/LaunchPad Browser Extension/creating-an-experiment-with-the-launchpad.mdx b/docs/web-console-docs/launchpad-browser-extension/creating-an-experiment-with-the-launchpad.mdx similarity index 99% rename from docs/LaunchPad Browser Extension/creating-an-experiment-with-the-launchpad.mdx rename to docs/web-console-docs/launchpad-browser-extension/creating-an-experiment-with-the-launchpad.mdx index c21ef726..f6c15f5f 100644 --- a/docs/LaunchPad Browser Extension/creating-an-experiment-with-the-launchpad.mdx +++ b/docs/web-console-docs/launchpad-browser-extension/creating-an-experiment-with-the-launchpad.mdx @@ -2,7 +2,7 @@ sidebar_position: 2 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Creating an experiment using the ABsmartly LaunchPad diff --git a/docs/LaunchPad Browser Extension/getting-started-with-the-launchpad.mdx b/docs/web-console-docs/launchpad-browser-extension/getting-started.mdx similarity index 98% rename from docs/LaunchPad Browser Extension/getting-started-with-the-launchpad.mdx rename to docs/web-console-docs/launchpad-browser-extension/getting-started.mdx index b7fa22c5..6fb69baa 100644 --- a/docs/LaunchPad Browser Extension/getting-started-with-the-launchpad.mdx +++ b/docs/web-console-docs/launchpad-browser-extension/getting-started.mdx @@ -2,7 +2,7 @@ sidebar_position: 1 --- -import Image from "../../src/components/Image"; +import Image from "../../../src/components/Image"; # Getting started with the ABSmartly LaunchPad diff --git a/docs/web-console-docs/overview.mdx b/docs/web-console-docs/overview.mdx new file mode 100644 index 00000000..1889e747 --- /dev/null +++ b/docs/web-console-docs/overview.mdx @@ -0,0 +1,21 @@ +--- +sidebar_position: 0 +--- + +# Overview + +The Product Documentation section explains how to use the ABsmartly Web Console. +If you run experiments, manage feature flags or work with goals and metrics, this is where you will find everything you need. + +These guides walk you through the full workflow of designing, running and analysing experiments, as well as managing your workspace and configuration. You will also find explanations of key concepts, best practices and governance features that help teams run safe and consistent experimentation at scale. + +Use this section to learn how to: +- create and manage experiments +- define goals, metrics and guardrails +- set up feature flags and rollouts +- track and validate events +- manage users, teams and permissions +- configure workspaces and settings +- use tools like LaunchPad for visual experimentation + +Whether you are new to ABsmartly or already running experiments, this section provides a complete guide to using the product effectively. \ No newline at end of file diff --git a/docs/web-console-docs/tutorial.mdx b/docs/web-console-docs/tutorial.mdx deleted file mode 100644 index ca1952b4..00000000 --- a/docs/web-console-docs/tutorial.mdx +++ /dev/null @@ -1,344 +0,0 @@ ---- -sidebar_position: 0 ---- - -import Image from "../../src/components/Image"; - -# Getting Started - -Welcome to your Web Console dashboard! This is where you will create, view and monitor -your experiments. - -A screenshot of a web console dashboard that has no experiments in it. - ---- - -You're almost ready to start [creating your first experiment](/docs/web-console-docs/creating-an-experiment)! -But first we have to tell the dashboard **how**, **where** and **why** you want to run -your tests. - -## Creating A Unit - -Units are the unique identifiers that are going to be used to generate a variant. -For experiments running across different platforms (iOS, Android, web, email, etc.) the -unit should be known across all of them. Most likely the authenticated -user's `user_id`. - -Have a look at the [Tracking Unit](/docs/web-console-docs/creating-an-experiment#tracking-unit) -section of the **Creating an Experiment** docs for more information. - -To define your first Unit go to `Settings > Units` and click on `Create Unit`. -All that needs set here is the name of your unit. We recommend that you give -it the same name as it's referenced in your code. - -:::info Tip -Feel free to add a description, so the rest of your team knows **what** the unit -will reference to and **why** it exists. -::: - -## Setting Your Applications - -When creating an experiment, you will be asked which platforms the experiment -will run on. - -To set these up, navigate to `Settings > Applications` and click `Create Application`. - -Similarly to Units, you can name these whatever you want and add a description -to define it more for your team. Some examples of application names might be -`ios`, `android` or `www`. - -Once your applications are defined, you're ready to move on to... - -## Setting Your Goals - -Goals are the names given to processes that you will later track in your code. - -To setup your first goal, head to `Settings > Goals` and click on `Create Goal`. - -The goal name will be used in your code, so we recommend using a keyword-like -or kebab-case name. For example, `newsletter_subscription`, `cpu_load_time` or -`bookings`. - -:::info Tip -[Tags](/docs/web-console-docs/settings#tags) are useful for searching, filtering and classifying your -goals. We recommend prefixing each tag with the tag's type. For example, -`location:Header`, `stack:Backend` or `psychological:Trust`. -::: - -## Creating A Metric - -Lastly, you will need to create some Metrics! Metrics are the parameters that -will be used to track your goals. They can give you insights about your -business, your users' behavior, the performance of your system and more! - -To create a metric, head to `Settings > Metrics` and click on `Create Metric`. - -Each metric needs a name. Some examples of metrics trackable metrics -might be `Bookers`, `Bookings` or `Time to Book`. You can also give your -metric a description for clarity across your team. - -### Metric Impact - -Next, you need to choose your metric's impact. For this, you have three choices - -positive, negative or unknown. - -| Positive | Negative | Unknown | -| :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| A metric with a positive impact means that if it is increased, it's good for your business. For example, `Bookings`, `Revenue` and `Subscribers` would all be positive metrics. | Metrics with a negative impact, on the other hand, are bad for business if they increase in frequency. For example, `Unsubscriptions` or `Account Deletions`. | Metrics with an unknown impact are used when the effect of the metric could be either positive or negative. For example, `Time on Page` could be positive or negative depending on whether the user is engaged, versus whether they are lost. | - -### Metric Type - -Next up is the metric's type. There are 4 types to choose from: - -#### Goal Unique Count - -`Goal Unique Count` is used when you want to automatically track the amount -of unique users [(units)](#units) who performed a goal. `Bookers` is a good -example of this as you want to track the amount of people who booked, not the -total amount of bookings. For that you would choose... - -#### Goal Count - -`Goal Count` is used for when you want to track every instance of a goal being -achieved. `Bookings` is a good example for this one, as a single user may -have made multiple bookings. - -#### Goal Time to Achievement - -`Goal Time to Achievement` is the amount of time between the user being -exposed to your experiment and achieving a goal. This is tracked -automatically by all of our SDKs. - -#### Goal Property - -`Goal Property` is used when you want to use your own properties for tracking -a metric. For example, you may wish to track the `Revenue` made during your -experiment. This will be done by passing the revenue as a property when tracking -your goal. For example, in the Javascript SDK it would look like this: - -```js -context.track("goal_name", { revenue: 99.99 }); -``` - -Selecting `Goal Property` in the Metric Type field adds some extra fields to -the form. You can learn more about these in the -[goal property fields section](#goal-property-fields). - -#### Goal Retention - -`Goal Retention` metrics are used to track data when a user has achieved a -goal after a set period of time from when they were first exposed to -the experiment or from their first achievement of the same goal. - -For example, you could track whether users who left items in their checkout -cart came back to make a purchase after one week (maybe after receiving an -automatic reminder email). This could be done by creating a Retention Metric -that tracks a `checkout` goal and setting the `Retention Period` to 7 days. - -#### Goal Ratio - -A `Goal Ratio` metric tracks two goals and divides one by the other. When -selecting Goal Ratio from the Metric type dropdown, you will be presented with -two goal selection sections - a numerator and a denominator. - -This metric type is useful for tracking data like click-through-rate. For example, -you could choose `clicked_signup_button` as the numerator goal, and -`visited_landing_page` as the denominator goal. This metric will now track the -CTR of the signup button on the landing page. - -Internally, the variance of these metrics is automatically adjusted using the -delta method. - -### Metric Goal - -The `Metric Goal` is the goal which your metric relates to. This field will -provide a dropdown of any goals that you set up in the [Setting Your Goals section](#3-setting-your-goals). - -### Goal Property Fields - -If your metric is of `Goal Property` type, you will have to fill in some -extra information on where the property value will come from and how you want -it to be formatted on the dashboard. - -Upon selection, you will be presented with 4 more fields. - -#### Source Property - -This field defines where the property value comes from in your track call. -For example, if you were to call the track method in your code with the -following parameters: - -```js -context.track("revenue", { - payment: { - provider: "paypal", - revenue: 99.99, - currency: "USD", - }, -}); -``` - -And you want to use the revenue made as your metric's source property, you -would pass `payment/revenue` into the Source Property field. - -#### Format String - -The format string defines how you would like your values to be presented in -the Web Console. Both the values and the mean of your data can be customised -this way. The curly braces `{}` represent the value and you can put -whatever you like around it. For example, if you wanted to track your -incoming revenue in dollars, you might want your Format String to be `${}`. -This would cause a value of `1000` to be presented on the dashboard as `$1000`. - -Below the Formatting fields, you can see a preview of how your value will be -formatted. - -#### Scale of Value - -The scale of value is a multiplier for your value. This is useful if, -for example, your revenue is tracked in cents. You could set the metric's -scale of value to `0.01` and a revenue value of `100000` cents would show itself -as `$1000`. - -#### Precision - -The precision option allows you to set a number of decimal places that your -value can have. Set this to `2` and a revenue value of `1000.455` will be -presented as `$1000.46`. - -### Outliers - -Outlier limits force extreme metric values to be within a specified range to -reduce variance. There are four options to choose from: - - - - - - - - - - - - - - - - - - - - - - - - - - -
MethodDescription
Unlimited - No outlier treatment is performed and outliers are included in the data - as normal. -
Quantile - Limits outliers to quantiles, where you can choose your own lower and - upper quantile limits. -
Standard Deviation - Cap each value to a multiple of the standard deviation of all seen - values. -
Fixed - Allows you to limit outlier values to specific upper and lower values. -
- -### Property Filters - -Property Filters allow you to filter your metrics to make them more specific. -Let's take our example `track` call from earlier: - -```js -context.track("revenue", { - payment: { - provider: "paypal", - revenue: 99.99, - currency: "USD", - }, -}); -``` - -If you are tracking your revenue, you may want to limit the metric to amounts -that were paid in dollars. This can be done by adding a property filter where -`payment/currency` - `Is` - `USD`. Now, only amounts paid in dollars will be -tracked by this metric. - -:::info NOTE -In this use-case, you might want to rename the metric to `Revenue in Dollars`. -::: - -### Goal Relations - -Goal Relations allow you to update the value of a goal when another foreign goal, related to the metric's goal, is achieved. -The two goals are related through a common unique key, present in both goals' properties, for example, `transaction_id` or `order_id`. - -This is useful, for example, when a user requests a refund. You may want to update the `Revenue` metric -to subtract the amount of the refund from the total revenue. - -To add a Goal Relation, click the `Add relation` button. - -#### Relation Type - -The `Relation Type` defines what happens to the value of the goal when the other goal is achieved. There are three options: - -| Relation Type | Description | Metric Type | -|---------------|-----------------------------------------------------------------------------------------------------------------|-----------------------| -| Cancellation | The initial value of the goal achievement is nullified. | All metric types | -| Refund | The new value from the foreign goal is added to or subtracted from the initial value from the goal achievement. | Goal Property metrics | -| Replacement | The initial value of the goal achievement is replaced with the foreign goal achievement's value. | Goal Property metrics | - -#### Current goal relation key property - -The `Current goal relation key property` defines the property that will be used to identify the goal achievement that -will be updated. In our refund example, this could be `order_id`. - -#### Refund operation (only for refund relations) - -The `Refund operation` defines whether the value of the foreign goal achievement will be added to or subtracted from -the initial value of the goal achievement. For example, if you have a `Revenue` metric that tracks the `amount` property of -a `Purchase` goal, and you have a `Refund` goal that represented its value as a negative number, you would want to -add the value of the `Refund` goal achievement to the `Revenue` goal achievement. - -#### Foreign goal - -The `Foreign goal` defines the goal that will be used to update the value of the current goal. In our refund example, -this would be the `Refund` goal. - -#### Foreign goal relation key property - -The `Foreign goal relation key property` defines the property that will be used to identify the goal achievement that -will be used to update the value of the current goal. In our example, this would also be `order_id`. - -#### Foreign goal relation value property (only for replacement and refund relations) - -The `Foreign goal relation value property` defines the property that will be used to update the value of the current goal. -For example, `refund_amount`. - -#### Duplicated foreign goal aggregation (only for replacement and refund relations) - -The `Duplicated foreign goal aggregation` defines how the value of the foreign goal achievement will be aggregated -when there are multiple foreign goal achievements that match the `Foreign goal relation key property`. - -There are five options: - -| Aggregation Type | Description | -|------------------|----------------------------------------------------------------------------------------------------------| -| Pick first | The first foreign goal achievement will be used to update the value of the current goal. | -| Pick last | The last foreign goal achievement will be used to update the value of the current goal. | -| Sum | The sum of all foreign goal achievements will be used to update the value of the current goal. | -| Minimum | The minimum value of all foreign goal achievements will be used to update the value of the current goal. | -| Maximum | The maximum value of all foreign goal achievements will be used to update the value of the current goal. | - -## The Next Step - -Now you have your units, applications, goals and metrics set up, you're ready to move -on to the next chapter - [creating an experiment](/docs/web-console-docs/creating-an-experiment)! diff --git a/docs/web-console-docs/types-of-analysis.mdx b/docs/web-console-docs/types-of-analysis.mdx deleted file mode 100644 index 61c185fd..00000000 --- a/docs/web-console-docs/types-of-analysis.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -sidebar_position: 9 ---- - -import Image from "../../src/components/Image"; - -# Analysis Types (Fixed Horizon & Group Sequential) - -ABsmartly supports two types of statistical analysis: Fixed Horizon and Group Sequential. Understanding the differences between these methods and knowing when to use each can significantly impact the efficiency and accuracy of your experimentation program. - -## Fixed Horizon Testing - -Fixed Horizon Testing involves analyzing the results of an experiment after reaching a predefined sample size (number of unique visitors) or reaching a specific duration. This method, supported by most AB Testing tools, assumes that the sample size is defined before the experiment starts and remains unchanged throughout the runtime of the experiment. - -While this method is widely used and beneficial, it lacks flexibility, as decisions can only be made at a single predefined moment. This limitation can lead to unreliable decisions (when experimenters make decisions too early) as well as wasted time and resources. This makes the use of Fixed Horizon testing for product experimentation, where trust, speed, and agility are crucial, less beneficial and more challenging. This is especially true for teams with less experience. - -Fixed horizon uses a 2-sided test, meaning it evaluates whether the observed effect is significantly in either direction (positive or negative). Results in a 2 sided-test can be significantly positive, significantly negative or insignificant. - - -## Group Sequential Testing - -[Group Sequential Testing](https://absmartly.com/gst) is an adaptive analysis method that allows for interim analyses at various points during the experiment. At ABsmartly you can decide how often or how many interim analyses you want. Unlike Fixed Horizon Testing, this approach provides the flexibility to stop the experiments early for efficacy or for futility. - -Setting up a Group Sequential Test - - -While adding more interim analysis will slightly reduce statistical power compared to fixed-horizon testing, overall it greatly speeds up decision-making, as significance is commonly reached before the full sample is collected. This efficiency gained from using Group Sequential Testing is making a real difference to ABsmartly customers, to the pace at which decisions can be made. - -A Group Sequential Test result - -Unlike Fixed Horizon, Group Sequential Testing uses a 1-sided test, meaning it evaluates whether the observed effect is significant only in the expected direction. Results in a 1-sided test can either be significant in the expected direction or insignificant. - -:::info -Different experimentation platforms might use different sequential testing implementation. The most commonly used sequential method is Fully Sequential and while it offers the most flexibility (decisions can be made at any moment in time), it comes at the cost of much lower power which in turn leads to higher time to decision. At ABsmartly we believe Group Sequential Testing provides the right compromise between flexibility and speed which is required to make high-quality product decisions in a fast-moving business context. -::: - -## Which Analysis Type Should I Choose for My Experiment? - -Most of the time, Group Sequential Testing should be the preferred method (who does not want faster trustworthy results?) but there are a few use cases where you might decide to use a Fixed Horizon setup. This is mainly when you are dealing with a strong novelty effect (Group Sequential Testing might come to a premature conclusion which might not reflect the true impact) or where you have a long action cycle and wish to observe the visitors for a pre-defined period of time. - -Because it is a 2-sided test, Fixed Horizon is a better choice if differentiating between inconclusive and significantly negative results is important. diff --git a/docusaurus.config.js b/docusaurus.config.js index d364600d..adf24e10 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -8,7 +8,7 @@ const darkCodeTheme = require("prism-react-renderer/themes/dracula"); const config = { title: "The ABsmartly Docs", tagline: - "Booking.com and Netflix's experimentation culture without the big investment.", + "Booking.com experimentation culture without the big investment.", url: "https://docs.absmartly.com", baseUrl: "/", onBrokenLinks: "throw", @@ -42,6 +42,9 @@ const config = { locales: ["en"], }, + // Client modules for anchor redirects + clientModules: [require.resolve("./src/anchorRedirects.js")], + presets: [ [ "classic", @@ -85,7 +88,7 @@ const config = { config: { collector: { specPath: "api-spec.yaml", // Path to designated spec file - outputDir: "docs/SDK-API", // Output directory for generated .mdx docs + outputDir: "docs/APIs-and-SDKs/SDK-API", // Output directory for generated .mdx docs sidebarOptions: { sidebarCollapsible: false, sidebarCollapsed: false, @@ -93,7 +96,7 @@ const config = { }, nodeapi: { specPath: "nodeapi-spec.yaml", - outputDir: "docs/Web-Console-API", // Output directory for generated .mdx docs + outputDir: "docs/APIs-and-SDKs/Web-Console-API", // Output directory for generated .mdx docs sidebarOptions: { sidebarCollapsible: false, sidebarCollapsed: false, @@ -126,7 +129,7 @@ const config = { navbar: { title: "ABsmartly Docs", logo: { - alt: "The A B Smartly logo", + alt: "The ABsmartly logo", src: "img/logo.svg", style: { width: "3rem", @@ -134,24 +137,14 @@ const config = { }, items: [ { - to: "docs/web-console-docs/tutorial", - position: "left", - label: "Web Console Tutorial", - }, - { - to: "docs/SDK-documentation", + to: "docs/web-console-docs/overview", position: "left", - label: "SDK Docs", + label: "Product Documentation", }, { - to: "docs/SDK-API/absmartly-collector-api", + to: "docs/APIs-and-SDKs/overview", position: "left", - label: "SDK API", - }, - { - to: "docs/Web-Console-API/absmartly-web-console-api", - position: "left", - label: "Web Console API", + label: "APIs & SDKs", }, { to: "https://absmartly.com/blog", @@ -171,13 +164,17 @@ const config = { { title: "Documentation", items: [ + { + label: "Product Docs", + to: "/docs/get-started", + }, { label: "SDK Docs", - to: "/docs/SDK-Documentation", + to: "/docs/APIs-and-SDKs/SDK-Documentation", }, { label: "API Docs", - to: "/docs/SDK-API/absmartly-collector-api", + to: "/docs/APIs-and-SDKs/SDK-API/absmartly-collector-api", }, ], }, @@ -201,10 +198,6 @@ const config = { { title: "Social Media", items: [ - { - label: "Twitter", - href: "https://twitter.com/absmartly", - }, { label: "LinkedIn", href: "https://www.linkedin.com/company/absmartly", @@ -215,7 +208,7 @@ const config = { copyright: `© Copyright ${new Date().getFullYear()} ABsmartly B.V.`, logo: { src: "img/logo.svg", - alt: "The A B Smartly Logo", + alt: "The ABsmartly Logo", style: { width: "3rem" }, }, }, diff --git a/src/anchorRedirects.js b/src/anchorRedirects.js new file mode 100644 index 00000000..0bd68c56 --- /dev/null +++ b/src/anchorRedirects.js @@ -0,0 +1,70 @@ +// Client-side anchor redirects for URL structure changes +// This handles cases where anchor names changed or moved to different pages + +const ANCHOR_REDIRECTS = { + // NEW paths (after server redirect) - handle anchor transformations + '/docs/web-console-docs/experiments/creating-an-experiment': { + 'application': 'applications', + 'targeting-audiences': 'audiences', + 'error-control': '/docs/web-console-docs/experiments/setting-up-a-gst-experiment#error-control', + 'sample-size-calculation': '/docs/web-console-docs/experiments/setting-up-a-gst-experiment#what-is-the-experiment-duration-based-on', + 'type-of-analysis': '/docs/web-console-docs/experiments/overview#analysis-methods', + }, + '/docs/web-console-docs/feature-flags/creating-a-feature': { + 'feature-name': 'basics', + 'tracking-unit': 'audiences', + 'application': 'audiences', + 'targeting-audiences': 'audiences', + }, + // OLD paths (for local testing and fallback) - redirect to new structure + '/docs/web-console-docs/creating-an-experiment': { + 'application': '/docs/web-console-docs/experiments/creating-an-experiment#applications', + 'targeting-audiences': '/docs/web-console-docs/experiments/creating-an-experiment#audiences', + 'metadata': '/docs/web-console-docs/experiments/creating-an-experiment#metadata', + 'experiment-name': '/docs/web-console-docs/experiments/creating-an-experiment#experiment-name', + 'tracking-unit': '/docs/web-console-docs/experiments/creating-an-experiment#tracking-unit', + 'variants': '/docs/web-console-docs/experiments/creating-an-experiment#variants', + 'metrics': '/docs/web-console-docs/experiments/creating-an-experiment#metrics', + 'error-control': '/docs/web-console-docs/experiments/setting-up-a-gst-experiment#error-control', + 'sample-size-calculation': '/docs/web-console-docs/experiments/setting-up-a-gst-experiment#what-is-the-experiment-duration-based-on', + 'type-of-analysis': '/docs/web-console-docs/experiments/overview#analysis-methods', + }, + '/docs/web-console-docs/creating-a-feature': { + 'feature-name': '/docs/web-console-docs/feature-flags/creating-a-feature#basics', + 'tracking-unit': '/docs/web-console-docs/feature-flags/creating-a-feature#audiences', + 'application': '/docs/web-console-docs/feature-flags/creating-a-feature#audiences', + 'targeting-audiences': '/docs/web-console-docs/feature-flags/creating-a-feature#audiences', + 'variants': '/docs/web-console-docs/feature-flags/creating-a-feature#variants', + 'metrics': '/docs/web-console-docs/feature-flags/creating-a-feature#metrics', + }, +}; + +function handleAnchorRedirect() { + const path = window.location.pathname.replace(/\/$/, ''); // Remove trailing slash + const hash = window.location.hash.slice(1); + + if (!hash) return; + + const redirects = ANCHOR_REDIRECTS[path]; + if (!redirects) return; + + const newDestination = redirects[hash]; + if (!newDestination) return; + + if (newDestination.startsWith('/')) { + window.location.replace(newDestination); + } else { + window.location.replace(`${path}#${newDestination}`); + } +} + +if (typeof window !== 'undefined') { + // Run on initial load + handleAnchorRedirect(); + + // Also run after client-side navigation (for SPAs like Docusaurus) + if (window.addEventListener) { + window.addEventListener('load', handleAnchorRedirect); + window.addEventListener('hashchange', handleAnchorRedirect); + } +} diff --git a/src/pages/index.tsx b/src/pages/index.tsx index aaec977e..adcaf92f 100644 --- a/src/pages/index.tsx +++ b/src/pages/index.tsx @@ -1,62 +1,11 @@ import React, { FC } from "react"; -import clsx from "clsx"; -import Link from "@docusaurus/Link"; -import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; -import Layout from "@theme/Layout"; -import HomepageFeatures from "@site/src/components/HomepageFeatures"; -import ThemedImage from "@theme/ThemedImage" -import styles from "./index.module.scss"; - -const HomepageHeader: FC<{}> = () => { - const { siteConfig } = useDocusaurusContext(); - - return ( -
-
- -

{siteConfig.title}

-

{siteConfig.tagline}

-
-
- - Read the Docs - - - Web Console Tutorial - -
-
-
- ); -}; const Home: FC<{}> = () => { - const { siteConfig } = useDocusaurusContext(); - return ( - - -
- -
-
- ); + React.useEffect(() => { + window.location.href = '/docs/get-started'; + }, []); + return null; }; export default Home; diff --git a/static/_redirects b/static/_redirects new file mode 100644 index 00000000..ef93a6f4 --- /dev/null +++ b/static/_redirects @@ -0,0 +1,14 @@ +# Netlify Redirects - Server-side 302 redirects for URL structure changes (temporary for testing) +# Format: from to [status-code] +# NOTE: Change to 301 (permanent) once verified all redirects work correctly +# Anchor redirects are handled client-side via JavaScript + +# Base path redirects (browser should preserve matching anchors) +# Note: Adding trailing slashes to match Docusaurus URL normalization +/docs/web-console-docs/creating-an-experiment /docs/web-console-docs/experiments/creating-an-experiment/ 302 +/docs/web-console-docs/creating-a-feature /docs/web-console-docs/feature-flags/creating-a-feature/ 302 +/docs/web-console-docs/setting-up-a-gst-experiment /docs/web-console-docs/experiments/setting-up-a-fixed-horizon-experiment/ 302 +/docs/web-console-docs/Experiment-reports /docs/web-console-docs/experiments/Experiment-reports/ 302 +/docs/web-console-docs/understanding-experimentation-metrics/* /docs/web-console-docs/goals-and-metrics/metrics/overview/ 302 +/docs/web-console-docs/settings /docs/web-console-docs/Configuration/settings/ 302 +/docs/web-console-docs/type-of-analysis /docs/web-console-docs/experiments/overview/#analysis-methods 302 diff --git a/static/img/configuration/applications.png b/static/img/configuration/applications.png new file mode 100644 index 00000000..d56367d4 Binary files /dev/null and b/static/img/configuration/applications.png differ diff --git a/static/img/configuration/goals.png b/static/img/configuration/goals.png new file mode 100644 index 00000000..eac3bcbf Binary files /dev/null and b/static/img/configuration/goals.png differ diff --git a/static/img/configuration/new-user.png b/static/img/configuration/new-user.png new file mode 100644 index 00000000..339fe6d2 Binary files /dev/null and b/static/img/configuration/new-user.png differ diff --git a/static/img/configuration/units.png b/static/img/configuration/units.png new file mode 100644 index 00000000..112eb7e1 Binary files /dev/null and b/static/img/configuration/units.png differ diff --git a/static/img/events/events-page-exposures.png b/static/img/events/events-page-exposures.png new file mode 100644 index 00000000..973c300f Binary files /dev/null and b/static/img/events/events-page-exposures.png differ diff --git a/static/img/events/events-page-goals.png b/static/img/events/events-page-goals.png new file mode 100644 index 00000000..f58aff5e Binary files /dev/null and b/static/img/events/events-page-goals.png differ diff --git a/static/img/events/events-page.png b/static/img/events/events-page.png new file mode 100644 index 00000000..3da98844 Binary files /dev/null and b/static/img/events/events-page.png differ diff --git a/static/img/events/export-events-page.png b/static/img/events/export-events-page.png new file mode 100644 index 00000000..041b0169 Binary files /dev/null and b/static/img/events/export-events-page.png differ diff --git a/static/img/events/goal-event-detail.png b/static/img/events/goal-event-detail.png new file mode 100644 index 00000000..0ace52f0 Binary files /dev/null and b/static/img/events/goal-event-detail.png differ diff --git a/static/img/events_list.png b/static/img/events_list.png deleted file mode 100644 index 3dec5d2c..00000000 Binary files a/static/img/events_list.png and /dev/null differ diff --git a/static/img/gst-setup.png b/static/img/experiment-create/gst-setup.png similarity index 100% rename from static/img/gst-setup.png rename to static/img/experiment-create/gst-setup.png diff --git a/static/img/experiment-create/metric-selection.png b/static/img/experiment-create/metric-selection.png new file mode 100644 index 00000000..49882e97 Binary files /dev/null and b/static/img/experiment-create/metric-selection.png differ diff --git a/static/img/experiment-results/gst-results.png b/static/img/experiment-results/gst-results.png new file mode 100644 index 00000000..86e5efc2 Binary files /dev/null and b/static/img/experiment-results/gst-results.png differ diff --git a/static/img/gst-widget.png b/static/img/gst-widget.png deleted file mode 100644 index e5887260..00000000 Binary files a/static/img/gst-widget.png and /dev/null differ diff --git a/static/img/gst/gst-setup.png b/static/img/gst/gst-setup.png deleted file mode 100644 index 0ec2ca4a..00000000 Binary files a/static/img/gst/gst-setup.png and /dev/null differ diff --git a/static/img/gst/gst-widget.png b/static/img/gst/gst-widget.png deleted file mode 100644 index e5887260..00000000 Binary files a/static/img/gst/gst-widget.png and /dev/null differ diff --git a/static/img/metrics/goal-relation-cancellation.png b/static/img/metrics/goal-relation-cancellation.png new file mode 100644 index 00000000..75c6fe04 Binary files /dev/null and b/static/img/metrics/goal-relation-cancellation.png differ diff --git a/static/img/metrics/goal-relation-refund.png b/static/img/metrics/goal-relation-refund.png new file mode 100644 index 00000000..dc501230 Binary files /dev/null and b/static/img/metrics/goal-relation-refund.png differ diff --git a/static/img/metrics/goal-relation-replacement.png b/static/img/metrics/goal-relation-replacement.png new file mode 100644 index 00000000..dde62e0d Binary files /dev/null and b/static/img/metrics/goal-relation-replacement.png differ diff --git a/static/img/metrics/gross-conversion-rate.png b/static/img/metrics/gross-conversion-rate.png new file mode 100644 index 00000000..3c196823 Binary files /dev/null and b/static/img/metrics/gross-conversion-rate.png differ diff --git a/static/img/metrics/lower-alert-threshold.png b/static/img/metrics/lower-alert-threshold.png new file mode 100644 index 00000000..76819e0b Binary files /dev/null and b/static/img/metrics/lower-alert-threshold.png differ diff --git a/static/img/metrics/metric-view.png b/static/img/metrics/metric-view.png new file mode 100644 index 00000000..20531bef Binary files /dev/null and b/static/img/metrics/metric-view.png differ