diff --git a/README.md b/README.md
deleted file mode 100644
index 56d3d70b6675..000000000000
--- a/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-
-
-
-
-[Apache HBase](https://hbase.apache.org) is an open-source, distributed, versioned, column-oriented store modeled after Google' [Bigtable](https://research.google.com/archive/bigtable.html): A Distributed Storage System for Structured Data by Chang et al. Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of [Apache Hadoop](https://hadoop.apache.org/).
-
-# Getting Start
-To get started using HBase, the full documentation for this release can be found under the doc/ directory that accompanies this README. Using a browser, open the docs/index.html to view the project home page (or browse https://hbase.apache.org). The hbase '[book](https://hbase.apache.org/book.html)' has a 'quick start' section and is where you should being your exploration of the hbase project.
-
-The latest HBase can be downloaded from the [download page](https://hbase.apache.org/downloads.html).
-
-We use mailing lists to send notice and discuss. The mailing lists and archives are listed [here](http://hbase.apache.org/mail-lists.html)
-
-We use the #hbase channel on the official [ASF Slack Workspace](https://the-asf.slack.com/) for real time questions and discussions. Please mail dev@hbase.apache.org to request an invite.
-
-# How to Contribute
-The source code can be found at https://hbase.apache.org/source-repository.html
-
-The HBase issue tracker is at https://hbase.apache.org/issue-tracking.html
-
-Notice that, the public registration for https://issues.apache.org/ has been disabled due to spam. If you want to contribute to HBase, please visit the [Request a jira account](https://selfserve.apache.org/jira-account.html) page to submit your request. Please make sure to select **hbase** as the '_ASF project you want to file a ticket_' so we can receive your request and process it.
-
-> **_NOTE:_** we need to process the requests manually so it may take sometime, for example, up to a week, for us to respond to your request.
-
-# About
-Apache HBase is made available under the [Apache License, version 2.0](https://hbase.apache.org/license.html)
-
-The HBase distribution includes cryptographic software. See the export control notice [here](https://hbase.apache.org/export_control.html).
diff --git a/hbase-website/.gitignore b/hbase-website/.gitignore
index 2888d6734ed0..eeb7d01c1709 100644
--- a/hbase-website/.gitignore
+++ b/hbase-website/.gitignore
@@ -34,3 +34,10 @@ lerna-debug.log*
# Generated files
/app/pages/team/developers.json
+# Playwright
+node_modules/
+/test-results/
+/playwright-report/
+/blob-report/
+/playwright/.cache/
+/playwright/.auth/
diff --git a/hbase-website/.source/index.ts b/hbase-website/.source/index.ts
new file mode 100644
index 000000000000..eb23d9be856e
--- /dev/null
+++ b/hbase-website/.source/index.ts
@@ -0,0 +1,21 @@
+///
+import { fromConfig } from 'fumadocs-mdx/runtime/vite';
+import type * as Config from '../source.config';
+
+export const create = fromConfig();
+
+export const docs = {
+ doc: create.doc("docs", "app/pages/_docs/docs/_mdx", import.meta.glob(["./**/*.mdx"], {
+ "query": {
+ "collection": "docs"
+ },
+ "base": "./../app/pages/_docs/docs/_mdx"
+ })),
+ meta: create.meta("docs", "app/pages/_docs/docs/_mdx", import.meta.glob(["./**/*.{json,yaml}"], {
+ "import": "default",
+ "base": "./../app/pages/_docs/docs/_mdx",
+ "query": {
+ "collection": "docs"
+ }
+ }))
+};
\ No newline at end of file
diff --git a/hbase-website/.vite/deps/_metadata.json b/hbase-website/.vite/deps/_metadata.json
new file mode 100644
index 000000000000..b32dd284744f
--- /dev/null
+++ b/hbase-website/.vite/deps/_metadata.json
@@ -0,0 +1,8 @@
+{
+ "hash": "c3d4a621",
+ "configHash": "fe8bf0bb",
+ "lockfileHash": "f94e92f2",
+ "browserHash": "0081a6ff",
+ "optimized": {},
+ "chunks": {}
+}
\ No newline at end of file
diff --git a/hbase-website/.vite/deps/package.json b/hbase-website/.vite/deps/package.json
new file mode 100644
index 000000000000..3dbc1ca591c0
--- /dev/null
+++ b/hbase-website/.vite/deps/package.json
@@ -0,0 +1,3 @@
+{
+ "type": "module"
+}
diff --git a/hbase-website/app/app.css b/hbase-website/app/app.css
index 30bf9ef10d7e..91b1f11a904a 100644
--- a/hbase-website/app/app.css
+++ b/hbase-website/app/app.css
@@ -18,7 +18,12 @@
@import "tailwindcss";
@import "tw-animate-css";
-@import "highlight.js/styles/github.css";
+@import 'fumadocs-ui/css/shadcn.css';
+@import 'fumadocs-ui/css/preset.css';
+
+@plugin "@tailwindcss/typography" {
+ className: prose-original;
+}
@theme {
}
@@ -179,33 +184,12 @@
}
}
-/* Code syntax highlighting for dark mode */
-.dark pre code.hljs {
- background: oklch(0.2 0 0);
- color: oklch(0.85 0 0);
-}
-
-.dark .hljs-comment {
- color: oklch(0.55 0 0);
-}
-
-.dark .hljs-keyword {
- color: oklch(0.75 0.12 340);
-}
-
-.dark .hljs-string {
- color: oklch(0.75 0.1 140);
-}
-
-.dark .hljs-number {
- color: oklch(0.75 0.1 100);
-}
-
-.dark .hljs-title {
- color: oklch(0.75 0.12 260);
-}
-
-.dark .hljs-name,
-.dark .hljs-attribute {
- color: oklch(0.7 0.12 200);
+/* For rendering a PDF */
+@media print {
+ #nd-docs-layout {
+ --fd-sidebar-width: 0px !important;
+ }
+ #nd-sidebar {
+ display: none;
+ }
}
diff --git a/hbase-website/app/components/docs/layout/docs/client.tsx b/hbase-website/app/components/docs/layout/docs/client.tsx
new file mode 100644
index 000000000000..367a3f548b4f
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/docs/client.tsx
@@ -0,0 +1,157 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import {
+ type ComponentProps,
+ createContext,
+ type ReactNode,
+ use,
+ useEffect,
+ useMemo,
+ useState
+} from "react";
+import { useSidebar } from "../sidebar/base";
+import { usePathname } from "fumadocs-core/framework";
+import Link from "fumadocs-core/link";
+import type { SidebarTab } from "../sidebar/tabs";
+import { isTabActive } from "../sidebar/tabs/dropdown";
+import { cn } from "@/lib/utils";
+
+export const LayoutContext = createContext<{
+ isNavTransparent: boolean;
+} | null>(null);
+
+export function LayoutContextProvider({
+ navTransparentMode = "none",
+ children
+}: {
+ navTransparentMode?: "always" | "top" | "none";
+ children: ReactNode;
+}) {
+ const isTop = useIsScrollTop({ enabled: navTransparentMode === "top" }) ?? true;
+ const isNavTransparent = navTransparentMode === "top" ? isTop : navTransparentMode === "always";
+
+ return (
+ ({
+ isNavTransparent
+ }),
+ [isNavTransparent]
+ )}
+ >
+ {children}
+
+ );
+}
+
+export function LayoutHeader(props: ComponentProps<"header">) {
+ const { isNavTransparent } = use(LayoutContext)!;
+
+ return (
+
+ {props.children}
+
+ );
+}
+
+export function LayoutBody({ className, style, children, ...props }: ComponentProps<"div">) {
+ const { collapsed } = useSidebar();
+
+ return (
+
+ );
+}
+
+export { PageLastUpdate, PageBreadcrumb } from "./client";
diff --git a/hbase-website/app/components/docs/layout/docs/sidebar.tsx b/hbase-website/app/components/docs/layout/docs/sidebar.tsx
new file mode 100644
index 000000000000..a8879202a129
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/docs/sidebar.tsx
@@ -0,0 +1,264 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import * as Base from "../sidebar/base";
+import { type ComponentProps, useRef } from "react";
+import { cva } from "class-variance-authority";
+import { createPageTreeRenderer } from "../sidebar/page-tree";
+import { createLinkItemRenderer } from "../sidebar/link-item";
+import { buttonVariants } from "../../../../ui/button";
+import { SearchToggle } from "../search-toggle";
+import { Sidebar as SidebarIcon } from "lucide-react";
+import { cn, mergeRefs } from "@/lib/utils";
+
+const itemVariants = cva(
+ "relative flex flex-row items-center gap-2 rounded-lg p-2 text-start text-fd-muted-foreground wrap-anywhere [&_svg]:size-4 [&_svg]:shrink-0",
+ {
+ variants: {
+ variant: {
+ link: "transition-colors hover:bg-fd-accent/50 hover:text-fd-accent-foreground/80 hover:transition-none data-[active=true]:bg-fd-primary/10 data-[active=true]:text-fd-primary data-[active=true]:hover:transition-colors",
+ button:
+ "transition-colors hover:bg-fd-accent/50 hover:text-fd-accent-foreground/80 hover:transition-none"
+ },
+ highlight: {
+ true: "data-[active=true]:before:content-[''] data-[active=true]:before:bg-fd-primary data-[active=true]:before:absolute data-[active=true]:before:w-px data-[active=true]:before:inset-y-2.5 data-[active=true]:before:start-2.5"
+ }
+ }
+ }
+);
+
+function getItemOffset(depth: number) {
+ return `calc(${2 + 3 * depth} * var(--spacing))`;
+}
+
+export {
+ SidebarProvider as Sidebar,
+ SidebarFolder,
+ SidebarCollapseTrigger,
+ SidebarViewport,
+ SidebarTrigger
+} from "../sidebar/base";
+
+export function SidebarContent({
+ ref: refProp,
+ className,
+ children,
+ isSearchToggleEnabled = true,
+ ...props
+}: ComponentProps<"aside"> & { isSearchToggleEnabled?: boolean }) {
+ const ref = useRef(null);
+
+ return (
+
+ {({ collapsed, hovered, ref: asideRef, ...rest }) => (
+ <>
+
+ {collapsed && }
+
+
+
+
+
+
+ {isSearchToggleEnabled && }
+
+ >
+ )}
+
+ );
+}
+
+export function SidebarDrawer({
+ children,
+ className,
+ ...props
+}: ComponentProps) {
+ return (
+ <>
+
+
+ {children}
+
+ >
+ );
+}
+
+export function SidebarSeparator({ className, style, children, ...props }: ComponentProps<"p">) {
+ const depth = Base.useFolderDepth();
+
+ return (
+
+ {children}
+
+ );
+}
+
+export function SidebarItem({
+ className,
+ style,
+ children,
+ ...props
+}: ComponentProps) {
+ const depth = Base.useFolderDepth();
+
+ return (
+ = 1 }), className)}
+ style={{
+ paddingInlineStart: getItemOffset(depth),
+ ...style
+ }}
+ {...props}
+ >
+ {children}
+
+ );
+}
+
+export function SidebarFolderTrigger({
+ className,
+ style,
+ ...props
+}: ComponentProps) {
+ const { depth, collapsible } = Base.useFolder()!;
+
+ return (
+
+ {props.children}
+
+ );
+}
+
+export function SidebarFolderLink({
+ className,
+ style,
+ ...props
+}: ComponentProps) {
+ const depth = Base.useFolderDepth();
+
+ return (
+ 1 }), "w-full", className)}
+ style={{
+ paddingInlineStart: getItemOffset(depth - 1),
+ ...style
+ }}
+ {...props}
+ >
+ {props.children}
+
+ );
+}
+
+export function SidebarFolderContent({
+ className,
+ children,
+ ...props
+}: ComponentProps) {
+ const depth = Base.useFolderDepth();
+
+ return (
+
+ {children}
+
+ );
+}
+
+export const SidebarPageTree = createPageTreeRenderer({
+ SidebarFolder: Base.SidebarFolder,
+ SidebarFolderContent,
+ SidebarFolderLink,
+ SidebarFolderTrigger,
+ SidebarItem,
+ SidebarSeparator
+});
+
+export const SidebarLinkItem = createLinkItemRenderer({
+ SidebarFolder: Base.SidebarFolder,
+ SidebarFolderContent,
+ SidebarFolderLink,
+ SidebarFolderTrigger,
+ SidebarItem
+});
diff --git a/hbase-website/app/components/docs/layout/language-toggle.tsx b/hbase-website/app/components/docs/layout/language-toggle.tsx
new file mode 100644
index 000000000000..0427f62d9702
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/language-toggle.tsx
@@ -0,0 +1,77 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type { ComponentProps } from "react";
+import { useI18n } from "fumadocs-ui/contexts/i18n";
+import { Popover, PopoverContent, PopoverTrigger } from "../../../ui/popover";
+import { buttonVariants } from "../../../ui/button";
+import { cn } from "@/lib/utils";
+
+export type LanguageSelectProps = ComponentProps<"button">;
+
+export function LanguageToggle(props: LanguageSelectProps): React.ReactElement {
+ const context = useI18n();
+ if (!context.locales) throw new Error("Missing ``");
+
+ return (
+
+
+ {props.children}
+
+
+
+ {context.text.chooseLanguage}
+
+ {context.locales.map((item) => (
+
+ ))}
+
+
+ );
+}
+
+export function LanguageToggleText(props: ComponentProps<"span">) {
+ const context = useI18n();
+ const text = context.locales?.find((item) => item.locale === context.locale)?.name;
+
+ return {text};
+}
diff --git a/hbase-website/app/components/docs/layout/link-item.tsx b/hbase-website/app/components/docs/layout/link-item.tsx
new file mode 100644
index 000000000000..132ef1ffa4c2
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/link-item.tsx
@@ -0,0 +1,128 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type { ComponentProps, ReactNode } from "react";
+import { usePathname } from "fumadocs-core/framework";
+import { isActive } from "../../../lib/urls";
+import Link from "fumadocs-core/link";
+
+interface Filterable {
+ /**
+ * Restrict where the item is displayed
+ *
+ * @defaultValue 'all'
+ */
+ on?: "menu" | "nav" | "all";
+}
+
+interface WithHref {
+ url: string;
+ /**
+ * When the item is marked as active
+ *
+ * @defaultValue 'url'
+ */
+ active?: "url" | "nested-url" | "none";
+ external?: boolean;
+}
+
+export interface MainItemType extends WithHref, Filterable {
+ type?: "main";
+ icon?: ReactNode;
+ text: ReactNode;
+ description?: ReactNode;
+}
+
+export interface IconItemType extends WithHref, Filterable {
+ type: "icon";
+ /**
+ * `aria-label` of icon button
+ */
+ label?: string;
+ icon: ReactNode;
+ text: ReactNode;
+ /**
+ * @defaultValue true
+ */
+ secondary?: boolean;
+}
+
+export interface ButtonItemType extends WithHref, Filterable {
+ type: "button";
+ icon?: ReactNode;
+ text: ReactNode;
+ /**
+ * @defaultValue false
+ */
+ secondary?: boolean;
+}
+
+export interface MenuItemType extends Partial, Filterable {
+ type: "menu";
+ icon?: ReactNode;
+ text: ReactNode;
+
+ items: (
+ | (MainItemType & {
+ /**
+ * Options when displayed on navigation menu
+ */
+ menu?: ComponentProps<"a"> & {
+ banner?: ReactNode;
+ };
+ })
+ | CustomItemType
+ )[];
+
+ /**
+ * @defaultValue false
+ */
+ secondary?: boolean;
+}
+
+export interface CustomItemType extends Filterable {
+ type: "custom";
+ /**
+ * @defaultValue false
+ */
+ secondary?: boolean;
+ children: ReactNode;
+}
+
+export type LinkItemType =
+ | MainItemType
+ | IconItemType
+ | ButtonItemType
+ | MenuItemType
+ | CustomItemType;
+
+export function LinkItem({
+ ref,
+ item,
+ ...props
+}: Omit, "href"> & { item: WithHref }) {
+ const pathname = usePathname();
+ const activeType = item.active ?? "url";
+ const active = activeType !== "none" && isActive(item.url, pathname, activeType === "nested-url");
+
+ return (
+
+ {props.children}
+
+ );
+}
diff --git a/hbase-website/app/components/docs/layout/search-toggle.tsx b/hbase-website/app/components/docs/layout/search-toggle.tsx
new file mode 100644
index 000000000000..2ed63d1f6dbc
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/search-toggle.tsx
@@ -0,0 +1,94 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type { ComponentProps } from "react";
+import { Search } from "lucide-react";
+import { useSearchContext } from "fumadocs-ui/contexts/search";
+import { useI18n } from "fumadocs-ui/contexts/i18n";
+import { type ButtonProps, buttonVariants } from "../../../ui/button";
+import { cn } from "@/lib/utils";
+
+interface SearchToggleProps extends Omit, "color">, ButtonProps {
+ hideIfDisabled?: boolean;
+}
+
+export function SearchToggle({
+ hideIfDisabled,
+ size = "icon-sm",
+ variant = "ghost",
+ ...props
+}: SearchToggleProps) {
+ const { setOpenSearch, enabled } = useSearchContext();
+ if (hideIfDisabled && !enabled) return null;
+
+ return (
+
+ );
+}
+
+export function LargeSearchToggle({
+ hideIfDisabled,
+ ...props
+}: ComponentProps<"button"> & {
+ hideIfDisabled?: boolean;
+}) {
+ const { enabled, hotKey, setOpenSearch } = useSearchContext();
+ const { text } = useI18n();
+ if (hideIfDisabled && !enabled) return null;
+
+ return (
+
+ );
+}
diff --git a/hbase-website/app/components/docs/layout/shared.tsx b/hbase-website/app/components/docs/layout/shared.tsx
new file mode 100644
index 000000000000..130221692a5b
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/shared.tsx
@@ -0,0 +1,120 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type { ComponentProps, ReactNode } from "react";
+import type { I18nConfig } from "fumadocs-core/i18n";
+import type { LinkItemType } from "./link-item";
+import Link from "fumadocs-core/link";
+
+export interface NavOptions {
+ enabled: boolean;
+ component: ReactNode;
+
+ title?: ReactNode | ((props: ComponentProps<"a">) => ReactNode);
+
+ /**
+ * Redirect url of title
+ * @defaultValue '/'
+ */
+ url?: string;
+
+ /**
+ * Use transparent background
+ *
+ * @defaultValue none
+ */
+ transparentMode?: "always" | "top" | "none";
+
+ children?: ReactNode;
+}
+
+export interface BaseLayoutProps {
+ themeSwitch?: {
+ enabled?: boolean;
+ component?: ReactNode;
+ mode?: "light-dark" | "light-dark-system";
+ };
+
+ searchToggle?: Partial<{
+ enabled: boolean;
+ components: Partial<{
+ sm: ReactNode;
+ lg: ReactNode;
+ }>;
+ }>;
+
+ /**
+ * I18n options
+ *
+ * @defaultValue false
+ */
+ i18n?: boolean | I18nConfig;
+
+ /**
+ * GitHub url
+ */
+ githubUrl?: string;
+
+ links?: LinkItemType[];
+ /**
+ * Replace or disable navbar
+ */
+ nav?: Partial;
+
+ children?: ReactNode;
+}
+
+/**
+ * Get link items with shortcuts
+ */
+export function resolveLinkItems({
+ links = [],
+ githubUrl
+}: Pick): LinkItemType[] {
+ const result = [...links];
+
+ if (githubUrl)
+ result.push({
+ type: "icon",
+ url: githubUrl,
+ text: "Github",
+ label: "GitHub",
+ icon: (
+
+ ),
+ external: true
+ });
+
+ return result;
+}
+
+export function renderTitleNav(
+ { title, url = "/" }: Partial,
+ props: ComponentProps<"a">
+) {
+ if (typeof title === "function") return title({ href: url, ...props });
+ return (
+
+ {title}
+
+ );
+}
+
+export type * from "./link-item";
diff --git a/hbase-website/app/components/docs/layout/sidebar/base.tsx b/hbase-website/app/components/docs/layout/sidebar/base.tsx
new file mode 100644
index 000000000000..bad26c16fe8d
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/sidebar/base.tsx
@@ -0,0 +1,423 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { ChevronDown, ExternalLink } from "lucide-react";
+import {
+ type ComponentProps,
+ createContext,
+ type PointerEvent,
+ type ReactNode,
+ type RefObject,
+ use,
+ useEffect,
+ useMemo,
+ useRef,
+ useState
+} from "react";
+import Link, { type LinkProps } from "fumadocs-core/link";
+import { useOnChange } from "fumadocs-core/utils/use-on-change";
+import { ScrollArea, type ScrollAreaProps, ScrollViewport } from "../../../../ui/scroll-area";
+import { isActive } from "../../../../lib/urls";
+import {
+ Collapsible,
+ CollapsibleContent,
+ type CollapsibleContentProps,
+ CollapsibleTrigger,
+ type CollapsibleTriggerProps
+} from "../../../../ui/collapsible";
+import { useMediaQuery } from "fumadocs-core/utils/use-media-query";
+import { Presence } from "@radix-ui/react-presence";
+import scrollIntoView from "scroll-into-view-if-needed";
+import { usePathname } from "fumadocs-core/framework";
+import { cn } from "@/lib/utils";
+
+interface SidebarContext {
+ open: boolean;
+ setOpen: React.Dispatch>;
+ collapsed: boolean;
+ setCollapsed: React.Dispatch>;
+
+ /**
+ * When set to false, don't close the sidebar when navigate to another page
+ */
+ closeOnRedirect: RefObject;
+ defaultOpenLevel: number;
+ prefetch?: boolean;
+ mode: Mode;
+}
+
+export interface SidebarProviderProps {
+ /**
+ * Open folders by default if their level is lower or equal to a specific level
+ * (Starting from 1)
+ *
+ * @defaultValue 0
+ */
+ defaultOpenLevel?: number;
+
+ /**
+ * Prefetch links, default behaviour depends on your React.js framework.
+ */
+ prefetch?: boolean;
+
+ children?: ReactNode;
+}
+
+type Mode = "drawer" | "full";
+
+const SidebarContext = createContext(null);
+
+const FolderContext = createContext<{
+ open: boolean;
+ setOpen: React.Dispatch>;
+ depth: number;
+ collapsible: boolean;
+} | null>(null);
+
+export function SidebarProvider({
+ defaultOpenLevel = 0,
+ prefetch,
+ children
+}: SidebarProviderProps) {
+ const closeOnRedirect = useRef(true);
+ const [open, setOpen] = useState(false);
+ const [collapsed, setCollapsed] = useState(false);
+ const pathname = usePathname();
+ const mode: Mode = useMediaQuery("(width < 768px)") ? "drawer" : "full";
+
+ useOnChange(pathname, () => {
+ if (closeOnRedirect.current) {
+ setOpen(false);
+ }
+ closeOnRedirect.current = true;
+ });
+
+ return (
+ ({
+ open,
+ setOpen,
+ collapsed,
+ setCollapsed,
+ closeOnRedirect,
+ defaultOpenLevel,
+ prefetch,
+ mode
+ }),
+ [open, collapsed, defaultOpenLevel, prefetch, mode]
+ )}
+ >
+ {children}
+
+ );
+}
+
+export function useSidebar(): SidebarContext {
+ const ctx = use(SidebarContext);
+ if (!ctx)
+ throw new Error(
+ "Missing SidebarContext, make sure you have wrapped the component in and the context is available."
+ );
+
+ return ctx;
+}
+
+export function useFolder() {
+ return use(FolderContext);
+}
+
+export function useFolderDepth() {
+ return use(FolderContext)?.depth ?? 0;
+}
+
+export function SidebarContent({
+ children
+}: {
+ children: (state: {
+ ref: RefObject;
+ collapsed: boolean;
+ hovered: boolean;
+ onPointerEnter: (event: PointerEvent) => void;
+ onPointerLeave: (event: PointerEvent) => void;
+ }) => ReactNode;
+}) {
+ const { collapsed, mode } = useSidebar();
+ const [hover, setHover] = useState(false);
+ const ref = useRef(null);
+ const timerRef = useRef(0);
+
+ useOnChange(collapsed, () => {
+ if (collapsed) setHover(false);
+ });
+
+ if (mode !== "full") return;
+
+ function shouldIgnoreHover(e: PointerEvent): boolean {
+ const element = ref.current;
+ if (!element) return true;
+
+ return !collapsed || e.pointerType === "touch" || element.getAnimations().length > 0;
+ }
+
+ return children({
+ ref,
+ collapsed,
+ hovered: hover,
+ onPointerEnter(e) {
+ if (shouldIgnoreHover(e)) return;
+ window.clearTimeout(timerRef.current);
+ setHover(true);
+ },
+ onPointerLeave(e) {
+ if (shouldIgnoreHover(e)) return;
+ window.clearTimeout(timerRef.current);
+
+ timerRef.current = window.setTimeout(
+ () => setHover(false),
+ // if mouse is leaving the viewport, add a close delay
+ Math.min(e.clientX, document.body.clientWidth - e.clientX) > 100 ? 0 : 500
+ );
+ }
+ });
+}
+
+export function SidebarDrawerOverlay(props: ComponentProps<"div">) {
+ const { open, setOpen, mode } = useSidebar();
+
+ if (mode !== "drawer") return;
+ return (
+
+
;
+}
+
+export function SidebarFolderLink({ children, ...props }: LinkProps) {
+ const ref = useRef(null);
+ const { open, setOpen, collapsible } = use(FolderContext)!;
+ const { prefetch } = useSidebar();
+ const pathname = usePathname();
+ const active = props.href !== undefined && isActive(props.href, pathname, false);
+
+ useAutoScroll(active, ref);
+
+ return (
+ {
+ if (!collapsible) return;
+
+ if (e.target instanceof Element && e.target.matches("[data-icon], [data-icon] *")) {
+ setOpen(!open);
+ e.preventDefault();
+ } else {
+ setOpen(active ? !open : true);
+ }
+ }}
+ prefetch={prefetch}
+ {...props}
+ >
+ {children}
+ {collapsible && (
+
+ )}
+
+ );
+}
+
+export function SidebarFolderContent(props: CollapsibleContentProps) {
+ return {props.children};
+}
+
+export function SidebarTrigger({ children, ...props }: ComponentProps<"button">) {
+ const { setOpen } = useSidebar();
+
+ return (
+
+ );
+}
+
+export function SidebarCollapseTrigger(props: ComponentProps<"button">) {
+ const { collapsed, setCollapsed } = useSidebar();
+
+ return (
+
+ );
+}
+
+/**
+ * scroll to the element if `active` is true
+ */
+export function useAutoScroll(active: boolean, ref: RefObject) {
+ const { mode } = useSidebar();
+
+ useEffect(() => {
+ if (active && ref.current) {
+ scrollIntoView(ref.current, {
+ boundary: document.getElementById(mode === "drawer" ? "nd-sidebar-mobile" : "nd-sidebar"),
+ scrollMode: "if-needed"
+ });
+ }
+ }, [active, mode, ref]);
+}
diff --git a/hbase-website/app/components/docs/layout/sidebar/link-item.tsx b/hbase-website/app/components/docs/layout/sidebar/link-item.tsx
new file mode 100644
index 000000000000..eace0fec4087
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/sidebar/link-item.tsx
@@ -0,0 +1,78 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type { HTMLAttributes } from "react";
+import type * as Base from "./base";
+import type { LinkItemType } from "../link-item";
+
+type InternalComponents = Pick<
+ typeof Base,
+ | "SidebarFolder"
+ | "SidebarFolderLink"
+ | "SidebarFolderContent"
+ | "SidebarFolderTrigger"
+ | "SidebarItem"
+>;
+
+export function createLinkItemRenderer({
+ SidebarFolder,
+ SidebarFolderContent,
+ SidebarFolderLink,
+ SidebarFolderTrigger,
+ SidebarItem
+}: InternalComponents) {
+ /**
+ * Render sidebar items from page tree
+ */
+ return function SidebarLinkItem({
+ item,
+ ...props
+ }: HTMLAttributes & {
+ item: Exclude;
+ }) {
+ if (item.type === "custom") return
{item.children}
;
+
+ if (item.type === "menu")
+ return (
+
+ {item.url ? (
+
+ {item.icon}
+ {item.text}
+
+ ) : (
+
+ {item.icon}
+ {item.text}
+
+ )}
+
+ {item.items.map((child, i) => (
+
+ ))}
+
+
+ );
+
+ return (
+
+ {item.text}
+
+ );
+ };
+}
diff --git a/hbase-website/app/components/docs/layout/sidebar/page-tree.tsx b/hbase-website/app/components/docs/layout/sidebar/page-tree.tsx
new file mode 100644
index 000000000000..f56ccfe1069b
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/sidebar/page-tree.tsx
@@ -0,0 +1,109 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { useTreeContext, useTreePath } from "fumadocs-ui/contexts/tree";
+import { type FC, type ReactNode, useMemo, Fragment } from "react";
+import type * as PageTree from "fumadocs-core/page-tree";
+import type * as Base from "./base";
+
+export interface SidebarPageTreeComponents {
+ Item: FC<{ item: PageTree.Item }>;
+ Folder: FC<{ item: PageTree.Folder; children: ReactNode }>;
+ Separator: FC<{ item: PageTree.Separator }>;
+}
+
+type InternalComponents = Pick<
+ typeof Base,
+ | "SidebarSeparator"
+ | "SidebarFolder"
+ | "SidebarFolderLink"
+ | "SidebarFolderContent"
+ | "SidebarFolderTrigger"
+ | "SidebarItem"
+>;
+
+export function createPageTreeRenderer({
+ SidebarFolder,
+ SidebarFolderContent,
+ SidebarFolderLink,
+ SidebarFolderTrigger,
+ SidebarSeparator,
+ SidebarItem
+}: InternalComponents) {
+ function PageTreeFolder({ item, children }: { item: PageTree.Folder; children: ReactNode }) {
+ const path = useTreePath();
+
+ return (
+
+ {item.index ? (
+
+ {item.icon}
+ {item.name}
+
+ ) : (
+
+ {item.icon}
+ {item.name}
+
+ )}
+ {children}
+
+ );
+ }
+
+ /**
+ * Render sidebar items from page tree
+ */
+ return function SidebarPageTree(components: Partial) {
+ const { root } = useTreeContext();
+ const { Separator, Item, Folder = PageTreeFolder } = components;
+
+ return useMemo(() => {
+ function renderSidebarList(items: PageTree.Node[]) {
+ return items.map((item, i) => {
+ if (item.type === "separator") {
+ if (Separator) return ;
+ return (
+
+ {item.icon}
+ {item.name}
+
+ );
+ }
+
+ if (item.type === "folder") {
+ return (
+
+ {renderSidebarList(item.children)}
+
+ );
+ }
+
+ if (Item) return ;
+ return (
+
+ {item.name}
+
+ );
+ });
+ }
+
+ return {renderSidebarList(root.children)};
+ }, [Folder, Item, Separator, root]);
+ };
+}
diff --git a/hbase-website/app/components/docs/layout/sidebar/tabs/dropdown.tsx b/hbase-website/app/components/docs/layout/sidebar/tabs/dropdown.tsx
new file mode 100644
index 000000000000..79b263ee5150
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/sidebar/tabs/dropdown.tsx
@@ -0,0 +1,124 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { Check, ChevronsUpDown } from "lucide-react";
+import { type ComponentProps, type ReactNode, useMemo, useState } from "react";
+import Link from "fumadocs-core/link";
+import { usePathname } from "fumadocs-core/framework";
+import { isActive, normalize } from "@/lib/urls";
+import { useSidebar } from "../base";
+import { Popover, PopoverContent, PopoverTrigger } from "@/ui/popover";
+import type { SidebarTab } from "./index";
+import { cn } from "@/lib/utils";
+
+export interface SidebarTabWithProps extends SidebarTab {
+ props?: ComponentProps<"a">;
+}
+
+export function SidebarTabsDropdown({
+ options,
+ placeholder,
+ ...props
+}: {
+ placeholder?: ReactNode;
+ options: SidebarTabWithProps[];
+} & ComponentProps<"button">) {
+ const [open, setOpen] = useState(false);
+ const { closeOnRedirect } = useSidebar();
+ const pathname = usePathname();
+
+ const selected = useMemo(() => {
+ return options.findLast((item) => isTabActive(item, pathname));
+ }, [options, pathname]);
+
+ const onClick = () => {
+ closeOnRedirect.current = false;
+ setOpen(false);
+ };
+
+ const item = selected ? (
+ <>
+
+
+
+
+ );
+ })}
+
+
+ );
+}
+
+export function isTabActive(tab: SidebarTab, pathname: string) {
+ if (tab.urls) return tab.urls.has(normalize(pathname));
+
+ return isActive(tab.url, pathname, true);
+}
diff --git a/hbase-website/app/components/docs/layout/sidebar/tabs/index.tsx b/hbase-website/app/components/docs/layout/sidebar/tabs/index.tsx
new file mode 100644
index 000000000000..1aa5b5747623
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/sidebar/tabs/index.tsx
@@ -0,0 +1,101 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type * as PageTree from "fumadocs-core/page-tree";
+import type { ReactNode } from "react";
+
+export interface SidebarTab {
+ /**
+ * Redirect URL of the folder, usually the index page
+ */
+ url: string;
+
+ icon?: ReactNode;
+ title: ReactNode;
+ description?: ReactNode;
+
+ /**
+ * Detect from a list of urls
+ */
+ urls?: Set;
+ unlisted?: boolean;
+}
+
+export interface GetSidebarTabsOptions {
+ transform?: (option: SidebarTab, node: PageTree.Folder) => SidebarTab | null;
+}
+
+const defaultTransform: GetSidebarTabsOptions["transform"] = (option, node) => {
+ if (!node.icon) return option;
+
+ return {
+ ...option,
+ icon: (
+
+ {node.icon}
+
+ )
+ };
+};
+
+export function getSidebarTabs(
+ tree: PageTree.Root,
+ { transform = defaultTransform }: GetSidebarTabsOptions = {}
+): SidebarTab[] {
+ const results: SidebarTab[] = [];
+
+ function scanOptions(node: PageTree.Root | PageTree.Folder, unlisted?: boolean) {
+ if ("root" in node && node.root) {
+ const urls = getFolderUrls(node);
+
+ if (urls.size > 0) {
+ const option: SidebarTab = {
+ url: urls.values().next().value ?? "",
+ title: node.name,
+ icon: node.icon,
+ unlisted,
+ description: node.description,
+ urls
+ };
+
+ const mapped = transform ? transform(option, node) : option;
+ if (mapped) results.push(mapped);
+ }
+ }
+
+ for (const child of node.children) {
+ if (child.type === "folder") scanOptions(child, unlisted);
+ }
+ }
+
+ scanOptions(tree);
+ if (tree.fallback) scanOptions(tree.fallback, true);
+
+ return results;
+}
+
+function getFolderUrls(folder: PageTree.Folder, output: Set = new Set()): Set {
+ if (folder.index) output.add(folder.index.url);
+
+ for (const child of folder.children) {
+ if (child.type === "page" && !child.external) output.add(child.url);
+ if (child.type === "folder") getFolderUrls(child, output);
+ }
+
+ return output;
+}
diff --git a/hbase-website/app/components/docs/layout/theme-toggle.tsx b/hbase-website/app/components/docs/layout/theme-toggle.tsx
new file mode 100644
index 000000000000..5fc18c38639d
--- /dev/null
+++ b/hbase-website/app/components/docs/layout/theme-toggle.tsx
@@ -0,0 +1,93 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { cn } from "@/lib/utils";
+import { cva } from "class-variance-authority";
+import { Airplay, Moon, Sun } from "lucide-react";
+import { useTheme } from "next-themes";
+import { type ComponentProps, useEffect, useState } from "react";
+
+const itemVariants = cva("size-6.5 rounded-full p-1.5 text-fd-muted-foreground", {
+ variants: {
+ active: {
+ true: "bg-fd-accent text-fd-accent-foreground",
+ false: "text-fd-muted-foreground"
+ }
+ }
+});
+
+const full = [["light", Sun] as const, ["dark", Moon] as const, ["system", Airplay] as const];
+
+export function ThemeToggle({
+ className,
+ mode = "light-dark",
+ ...props
+}: ComponentProps<"div"> & {
+ mode?: "light-dark" | "light-dark-system";
+}) {
+ const { setTheme, theme, resolvedTheme } = useTheme();
+ const [mounted, setMounted] = useState(false);
+
+ useEffect(() => {
+ setMounted(true);
+ }, []);
+
+ const container = cn("inline-flex items-center rounded-full border p-1", className);
+
+ if (mode === "light-dark") {
+ const value = mounted ? resolvedTheme : null;
+
+ return (
+
+ );
+ }
+
+ const value = mounted ? theme : null;
+
+ return (
+
+ {full.map(([key, Icon]) => (
+
+ ))}
+
+ );
+}
diff --git a/hbase-website/app/components/docs/search/create-db.ts b/hbase-website/app/components/docs/search/create-db.ts
new file mode 100644
index 000000000000..108def1e5e50
--- /dev/null
+++ b/hbase-website/app/components/docs/search/create-db.ts
@@ -0,0 +1,110 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import {
+ create,
+ insertMultiple,
+ type Orama,
+ type PartialSchemaDeep,
+ type TypedDocument
+} from "@orama/orama";
+import type { AdvancedOptions } from "fumadocs-core/search/server";
+
+export type AdvancedDocument = TypedDocument>;
+export const advancedSchema = {
+ content: "string",
+ page_id: "string",
+ type: "string",
+ breadcrumbs: "string[]",
+ tags: "enum[]",
+ url: "string",
+ embeddings: "vector[512]"
+} as const;
+
+export async function createDB({
+ indexes,
+ tokenizer,
+ ...rest
+}: AdvancedOptions): Promise> {
+ const items = typeof indexes === "function" ? await indexes() : indexes;
+
+ const db = create({
+ schema: advancedSchema,
+ ...rest,
+ components: {
+ ...rest.components,
+ tokenizer: tokenizer ?? rest.components?.tokenizer
+ }
+ }) as Orama;
+
+ const mapTo: PartialSchemaDeep[] = [];
+ items.forEach((page) => {
+ const pageTag = page.tag ?? [];
+ const tags = Array.isArray(pageTag) ? pageTag : [pageTag];
+ const data = page.structuredData;
+ let id = 0;
+
+ mapTo.push({
+ id: page.id,
+ page_id: page.id,
+ type: "page",
+ content: page.title,
+ breadcrumbs: page.breadcrumbs,
+ tags,
+ url: page.url
+ });
+
+ const nextId = () => `${page.id}-${id++}`;
+
+ if (page.description) {
+ mapTo.push({
+ id: nextId(),
+ page_id: page.id,
+ tags,
+ type: "text",
+ url: page.url,
+ content: page.description
+ });
+ }
+
+ for (const heading of data.headings) {
+ mapTo.push({
+ id: nextId(),
+ page_id: page.id,
+ type: "heading",
+ tags,
+ url: `${page.url}#${heading.id}`,
+ content: heading.content
+ });
+ }
+
+ for (const content of data.contents) {
+ mapTo.push({
+ id: nextId(),
+ page_id: page.id,
+ tags,
+ type: "text",
+ url: content.heading ? `${page.url}#${content.heading}` : page.url,
+ content: content.content
+ });
+ }
+ });
+
+ await insertMultiple(db, mapTo);
+ return db;
+}
diff --git a/hbase-website/app/components/docs/search/create-from-source.ts b/hbase-website/app/components/docs/search/create-from-source.ts
new file mode 100644
index 000000000000..5ef4673d4ae5
--- /dev/null
+++ b/hbase-website/app/components/docs/search/create-from-source.ts
@@ -0,0 +1,136 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import {
+ type AdvancedIndex,
+ type AdvancedOptions,
+ createI18nSearchAPI,
+ type SearchAPI,
+ createSearchAPI
+} from "fumadocs-core/search/server";
+import { PathUtils } from "fumadocs-core/source";
+import type { Language } from "@orama/orama";
+import type { LoaderConfig, LoaderOutput, Page } from "fumadocs-core/source";
+import type { I18nConfig } from "fumadocs-core/i18n";
+import { findPath } from "fumadocs-core/page-tree";
+import type { StructuredData } from "fumadocs-core/mdx-plugins";
+
+type Awaitable = T | Promise;
+
+function defaultBuildIndex(
+ source: LoaderOutput,
+ tag?: (pageUrl: string) => string
+) {
+ function isBreadcrumbItem(item: unknown): item is string {
+ return typeof item === "string" && item.length > 0;
+ }
+
+ return async (page: Page): Promise => {
+ let breadcrumbs: string[] | undefined;
+ let structuredData: StructuredData | undefined;
+
+ if ("structuredData" in page.data) {
+ structuredData = page.data.structuredData as StructuredData;
+ } else if ("load" in page.data && typeof page.data.load === "function") {
+ structuredData = (await page.data.load()).structuredData;
+ }
+
+ if (!structuredData)
+ throw new Error(
+ "Cannot find structured data from page, please define the page to index function."
+ );
+
+ const pageTree = source.getPageTree(page.locale);
+ const path = findPath(
+ pageTree.children,
+ (node) => node.type === "page" && node.url === page.url
+ );
+ if (path) {
+ breadcrumbs = [];
+ path.pop();
+
+ if (isBreadcrumbItem(pageTree.name)) {
+ breadcrumbs.push(pageTree.name);
+ }
+
+ for (const segment of path) {
+ if (!isBreadcrumbItem(segment.name)) continue;
+
+ breadcrumbs.push(segment.name);
+ }
+ }
+
+ return {
+ title: page.data.title ?? PathUtils.basename(page.path, PathUtils.extname(page.path)),
+ breadcrumbs,
+ description: page.data.description,
+ url: page.url,
+ id: page.url,
+ structuredData,
+ tag: tag?.(page.url)
+ };
+ };
+}
+
+interface Options extends Omit {
+ localeMap?: {
+ [K in C["i18n"] extends I18nConfig ? Languages : string]?:
+ | Partial
+ | Language;
+ };
+ buildIndex?: (page: Page) => Awaitable;
+ tag?: (pageUrl: string) => string;
+}
+
+export function createFromSource(
+ source: LoaderOutput,
+ options?: Options
+): SearchAPI;
+
+export function createFromSource(
+ source: LoaderOutput,
+ options: Options = {}
+): SearchAPI {
+ const { buildIndex = defaultBuildIndex(source, options.tag) } = options;
+
+ if (source._i18n) {
+ return createI18nSearchAPI("advanced", {
+ ...options,
+ i18n: source._i18n,
+ indexes: async () => {
+ const indexes = source.getLanguages().flatMap((entry) => {
+ return entry.pages.map(async (page) => ({
+ ...(await buildIndex(page)),
+ locale: entry.language
+ }));
+ });
+
+ return Promise.all(indexes);
+ }
+ });
+ }
+
+ return createSearchAPI("advanced", {
+ ...options,
+ indexes: async () => {
+ const indexes = source.getPages().map((page) => buildIndex(page));
+
+ return Promise.all(indexes);
+ }
+ });
+}
diff --git a/hbase-website/app/components/docs/search/docs-search.tsx b/hbase-website/app/components/docs/search/docs-search.tsx
new file mode 100644
index 000000000000..fc6142217a88
--- /dev/null
+++ b/hbase-website/app/components/docs/search/docs-search.tsx
@@ -0,0 +1,78 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import {
+ SearchDialog as FumaDocsSearchDialog,
+ SearchDialogClose,
+ SearchDialogContent,
+ SearchDialogHeader,
+ SearchDialogIcon,
+ SearchDialogInput,
+ SearchDialogList,
+ SearchDialogOverlay,
+ type SharedProps
+} from "fumadocs-ui/components/dialog/search";
+import { useDocsSearch } from "./use-docs-search";
+import { create } from "@orama/orama";
+import { useI18n } from "fumadocs-ui/contexts/i18n";
+
+function initOrama() {
+ return create({
+ schema: { _: "string" },
+ language: "english"
+ });
+}
+
+export function SearchDialog(props: SharedProps) {
+ const { locale } = useI18n();
+
+ const { search, setSearch, query } = useDocsSearch({
+ type: "static",
+ initOrama,
+ locale,
+ tag: "multi-page"
+ });
+
+ return (
+
+
+
+
+
+
+
+
+ ({
+ ...i,
+ breadcrumbs: i.breadcrumbs?.filter((k) => k !== "Multi-Page Documentation")
+ }))
+ : null
+ }
+ />
+
+
+ );
+}
diff --git a/hbase-website/app/components/docs/search/static.ts b/hbase-website/app/components/docs/search/static.ts
new file mode 100644
index 000000000000..5be018e12b92
--- /dev/null
+++ b/hbase-website/app/components/docs/search/static.ts
@@ -0,0 +1,324 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import {
+ type AnyOrama,
+ create,
+ load,
+ type Orama,
+ type SearchParams,
+ search as searchOrama,
+ getByID
+} from "@orama/orama";
+import { type AdvancedDocument, type advancedSchema } from "./create-db";
+import { createContentHighlighter, type SortedResult } from "fumadocs-core/search";
+import type { ExportedData } from "fumadocs-core/search/server";
+import { removeUndefined } from "./utils";
+
+export interface StaticOptions {
+ /**
+ * Where to download exported search indexes (URL)
+ *
+ * @defaultValue '/api/search'
+ */
+ from?: string;
+
+ initOrama?: (locale?: string) => AnyOrama | Promise;
+
+ /**
+ * Filter results with specific tag(s).
+ */
+ tag?: string | string[];
+
+ /**
+ * Filter by locale (unsupported at the moment)
+ */
+ locale?: string;
+}
+
+const cache = new Map>();
+
+// locale -> db
+type Database = Map<
+ string,
+ {
+ db: AnyOrama;
+ }
+>;
+
+async function loadDB({
+ from = "/api/search",
+ initOrama = (locale) => create({ schema: { _: "string" }, language: locale })
+}: StaticOptions): Promise {
+ const cacheKey = from;
+ const cached = cache.get(cacheKey);
+ if (cached) return cached;
+
+ async function init() {
+ const res = await fetch(from);
+
+ if (!res.ok)
+ throw new Error(
+ `failed to fetch exported search indexes from ${from}, make sure the search database is exported and available for client.`
+ );
+
+ const data = (await res.json()) as ExportedData;
+ const dbs: Database = new Map();
+
+ if (data.type === "i18n") {
+ await Promise.all(
+ Object.entries(data.data).map(async ([k, v]) => {
+ const db = await initOrama(k);
+
+ load(db, v);
+ dbs.set(k, {
+ db
+ });
+ })
+ );
+
+ return dbs;
+ }
+
+ const db = await initOrama();
+ load(db, data);
+ dbs.set("", {
+ db
+ });
+ return dbs;
+ }
+
+ const result = init();
+ cache.set(cacheKey, result);
+ return result;
+}
+
+export async function search(query: string, options: StaticOptions) {
+ const { tag, locale } = options;
+
+ const db = (await loadDB(options)).get(locale ?? "");
+
+ if (!db) return [];
+
+ return searchAdvanced(db.db as Orama, query, tag);
+}
+
+export async function searchAdvanced(
+ db: Orama,
+ query: string,
+ tag: string | string[] = [],
+ {
+ mode = "fulltext",
+ ...override
+ }: Partial, AdvancedDocument>> = {}
+): Promise {
+ if (typeof tag === "string") tag = [tag];
+
+ let params = {
+ ...override,
+ mode,
+ where: removeUndefined({
+ tags:
+ tag.length > 0
+ ? {
+ containsAll: tag
+ }
+ : undefined,
+ ...override.where
+ }),
+ groupBy: {
+ properties: ["page_id"],
+ maxResult: 8,
+ ...override.groupBy
+ }
+ } as SearchParams;
+
+ if (query.length > 0) {
+ params = {
+ ...params,
+ term: query,
+ properties: mode === "fulltext" ? ["content"] : ["content", "embeddings"]
+ } as SearchParams;
+ }
+
+ const highlighter = createContentHighlighter(query);
+ const result = await searchOrama(db, params);
+
+ // Helper to detect phrase matches (3+ consecutive words)
+ const getPhraseMatchBoost = (content: string, searchTerm: string): number => {
+ const contentLower = content.toLowerCase();
+ const termLower = searchTerm.toLowerCase();
+
+ // Split search term into words
+ const searchWords = termLower.split(/\s+/).filter((w) => w.length > 0);
+
+ // Need at least 3 words for phrase matching
+ if (searchWords.length < 3) return 0;
+
+ // Check for longest consecutive word match
+ let maxConsecutiveMatch = 0;
+
+ for (let i = 0; i <= searchWords.length - 3; i++) {
+ // Try matching from 3 words up to all remaining words
+ for (let len = 3; len <= searchWords.length - i; len++) {
+ const phrase = searchWords.slice(i, i + len).join(" ");
+ if (contentLower.includes(phrase)) {
+ maxConsecutiveMatch = Math.max(maxConsecutiveMatch, len);
+ }
+ }
+ }
+
+ // Boost based on length of consecutive match
+ // Make this VERY high to dominate over heading matches
+ // 3 words: +10000, 4 words: +15000, 5+ words: +20000+
+ if (maxConsecutiveMatch >= 3) {
+ return 10000 + (maxConsecutiveMatch - 3) * 5000;
+ }
+
+ return 0;
+ };
+
+ // Helper to score match quality (exact > starts with > contains)
+ const getMatchQuality = (content: string, searchTerm: string): number => {
+ const lower = content.toLowerCase();
+ const term = searchTerm.toLowerCase();
+
+ if (lower === term) return 1000; // Exact match
+ if (lower.startsWith(term + " ")) return 500; // Starts with term + space
+ if (lower.startsWith(term)) return 400; // Starts with term
+ if (new RegExp(`\\b${term}\\b`, "i").test(content)) return 300; // Whole word
+ if (lower.includes(term)) return 100; // Contains
+ return 0;
+ };
+
+ // Collect all groups with scoring
+ const groupsWithScores: Array<{
+ pageId: string;
+ pageScore: number;
+ matchQuality: number;
+ phraseBoost: number;
+ totalScore: number;
+ page: any;
+ hits: any[];
+ }> = [];
+
+ for (const item of result.groups ?? []) {
+ const pageId = item.values[0] as string;
+ const page = getByID(db, pageId);
+ if (!page) continue;
+
+ // Find the page hit to get its Orama score
+ const pageHit = item.result.find((hit: any) => hit.document.type === "page");
+ const pageScore = pageHit?.score || 0;
+
+ // Check for phrase matches in ALL hits (page title + all content sections)
+ // Use the BEST phrase match from any hit to boost the entire group
+ let bestPhraseBoost = 0;
+ let bestMatchQuality = 0;
+
+ for (const hit of item.result) {
+ const hitPhraseBoost = getPhraseMatchBoost(hit.document.content, query);
+ const hitMatchQuality = getMatchQuality(hit.document.content, query);
+
+ if (hitPhraseBoost > bestPhraseBoost) {
+ bestPhraseBoost = hitPhraseBoost;
+ }
+ if (hitMatchQuality > bestMatchQuality) {
+ bestMatchQuality = hitMatchQuality;
+ }
+ }
+
+ const totalScore = bestMatchQuality + bestPhraseBoost;
+
+ groupsWithScores.push({
+ pageId,
+ pageScore,
+ matchQuality: bestMatchQuality,
+ phraseBoost: bestPhraseBoost,
+ totalScore,
+ page,
+ hits: item.result
+ });
+ }
+
+ // Sort groups: phrase matches + exact matches first, then by Orama score
+ groupsWithScores.sort((a, b) => {
+ // Prioritize results with phrase matches and exact matches
+ if (a.totalScore !== b.totalScore) {
+ return b.totalScore - a.totalScore;
+ }
+ // Then by Orama relevance
+ return b.pageScore - a.pageScore;
+ });
+
+ const list: SortedResult[] = [];
+
+ // Build final list from sorted groups
+ for (const { pageId, page, hits } of groupsWithScores) {
+ // Add page title
+ list.push({
+ id: pageId,
+ type: "page",
+ content: page.content,
+ breadcrumbs: page.breadcrumbs,
+ contentWithHighlights: highlighter.highlight(page.content),
+ url: page.url
+ });
+
+ // Sort hits within this group: by phrase match + match quality, then type, then Orama score
+ const sortedHits = [...hits]
+ .filter((hit: any) => hit.document.type !== "page")
+ .map((hit: any) => {
+ const typeScore = hit.document.type === "heading" ? 2 : 1;
+ const matchQuality = getMatchQuality(hit.document.content, query);
+ const phraseBoost = getPhraseMatchBoost(hit.document.content, query);
+ const totalScore = matchQuality + phraseBoost;
+
+ return {
+ hit,
+ typeScore,
+ matchQuality,
+ phraseBoost,
+ totalScore
+ };
+ })
+ .sort((a, b) => {
+ // First prioritize phrase matches and exact matches (combined score)
+ if (a.totalScore !== b.totalScore) return b.totalScore - a.totalScore;
+ // Then by type (heading > text)
+ if (a.typeScore !== b.typeScore) return b.typeScore - a.typeScore;
+ // Then by Orama relevance
+ return b.hit.score - a.hit.score;
+ })
+ .map((item) => item.hit);
+
+ // Add sorted hits
+ for (const hit of sortedHits) {
+ list.push({
+ id: hit.document.id.toString(),
+ content: hit.document.content,
+ breadcrumbs: hit.document.breadcrumbs,
+ contentWithHighlights: highlighter.highlight(hit.document.content),
+ type: hit.document.type as SortedResult["type"],
+ url: hit.document.url
+ });
+ }
+ }
+
+ return list;
+}
diff --git a/hbase-website/app/components/docs/search/use-docs-search.ts b/hbase-website/app/components/docs/search/use-docs-search.ts
new file mode 100644
index 000000000000..9b85048a7810
--- /dev/null
+++ b/hbase-website/app/components/docs/search/use-docs-search.ts
@@ -0,0 +1,137 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { type DependencyList, useRef, useState } from "react";
+import { type StaticOptions } from "fumadocs-core/search/client";
+import type { SortedResult } from "fumadocs-core/search";
+import { useDebounce, useOnChange } from "./utils";
+
+interface UseDocsSearch {
+ search: string;
+ setSearch: (v: string) => void;
+ query: {
+ isLoading: boolean;
+ data?: SortedResult[] | "empty";
+ error?: Error;
+ };
+}
+
+export type Client = {
+ type: "static";
+} & StaticOptions;
+
+function isDeepEqual(a: unknown, b: unknown): boolean {
+ if (a === b) return true;
+
+ if (Array.isArray(a) && Array.isArray(b)) {
+ return b.length === a.length && a.every((v, i) => isDeepEqual(v, b[i]));
+ }
+
+ if (typeof a === "object" && a && typeof b === "object" && b) {
+ const aKeys = Object.keys(a);
+ const bKeys = Object.keys(b);
+
+ return (
+ aKeys.length === bKeys.length &&
+ aKeys.every(
+ (key) =>
+ Object.hasOwn(b, key) && isDeepEqual(a[key as keyof object], b[key as keyof object])
+ )
+ );
+ }
+
+ return false;
+}
+
+/**
+ * Provide a hook to query different official search clients.
+ *
+ * Note: it will re-query when its parameters changed, make sure to use `useMemo()` on `clientOptions` or define `deps` array.
+ */
+export function useDocsSearch(
+ clientOptions: Client & {
+ /**
+ * The debounced delay for performing a search (in ms).
+ * .
+ * @defaultValue 100
+ */
+ delayMs?: number;
+
+ /**
+ * still perform search even if query is empty.
+ *
+ * @defaultValue false
+ */
+ allowEmpty?: boolean;
+ },
+ deps?: DependencyList
+): UseDocsSearch {
+ const { delayMs = 100, allowEmpty = false, ...client } = clientOptions;
+
+ const [search, setSearch] = useState("");
+ const [results, setResults] = useState("empty");
+ const [error, setError] = useState();
+ const [isLoading, setIsLoading] = useState(false);
+ const debouncedValue = useDebounce(search, delayMs);
+ const onStart = useRef<() => void>(undefined);
+
+ useOnChange(
+ [deps ?? clientOptions, debouncedValue],
+ () => {
+ if (onStart.current) {
+ onStart.current();
+ onStart.current = undefined;
+ }
+
+ setIsLoading(true);
+ let interrupt = false;
+ onStart.current = () => {
+ interrupt = true;
+ };
+
+ async function run(): Promise {
+ if (debouncedValue.length === 0 && !allowEmpty) return "empty";
+ switch (client.type) {
+ case "static": {
+ const { search } = await import("./static");
+ return search(debouncedValue, client);
+ }
+ default:
+ throw new Error("unknown search client");
+ }
+ }
+
+ void run()
+ .then((res) => {
+ if (interrupt) return;
+
+ setError(undefined);
+ setResults(res);
+ })
+ .catch((err: Error) => {
+ setError(err);
+ })
+ .finally(() => {
+ setIsLoading(false);
+ });
+ },
+ deps ? undefined : (a, b) => !isDeepEqual(a, b)
+ );
+
+ return { search, setSearch, query: { isLoading, data: results, error } };
+}
diff --git a/hbase-website/app/components/docs/search/utils.ts b/hbase-website/app/components/docs/search/utils.ts
new file mode 100644
index 000000000000..04b58029c6e7
--- /dev/null
+++ b/hbase-website/app/components/docs/search/utils.ts
@@ -0,0 +1,83 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { useEffect, useState } from "react";
+
+export function removeUndefined(value: T, deep = false): T {
+ const obj = value as Record;
+
+ for (const key in obj) {
+ if (obj[key] === undefined) delete obj[key];
+ if (!deep) continue;
+
+ const entry = obj[key];
+
+ if (typeof entry === "object" && entry !== null) {
+ removeUndefined(entry, deep);
+ continue;
+ }
+
+ if (Array.isArray(entry)) {
+ for (const item of entry) removeUndefined(item, deep);
+ }
+ }
+
+ return value;
+}
+
+/**
+ * @param value - state to watch
+ * @param onChange - when the state changed
+ * @param isUpdated - a function that determines if the state is updated
+ */
+export function useOnChange(
+ value: T,
+ onChange: (current: T, previous: T) => void,
+ isUpdated: (prev: T, current: T) => boolean = isDifferent
+): void {
+ const [prev, setPrev] = useState(value);
+
+ if (isUpdated(prev, value)) {
+ onChange(value, prev);
+ setPrev(value);
+ }
+}
+
+function isDifferent(a: unknown, b: unknown): boolean {
+ if (Array.isArray(a) && Array.isArray(b)) {
+ return b.length !== a.length || a.some((v, i) => isDifferent(v, b[i]));
+ }
+
+ return a !== b;
+}
+
+export function useDebounce(value: T, delayMs = 1000): T {
+ const [debouncedValue, setDebouncedValue] = useState(value);
+
+ useEffect(() => {
+ if (delayMs === 0) return;
+ const handler = window.setTimeout(() => {
+ setDebouncedValue(value);
+ }, delayMs);
+
+ return () => clearTimeout(handler);
+ }, [delayMs, value]);
+
+ if (delayMs === 0) return value;
+ return debouncedValue;
+}
diff --git a/hbase-website/app/components/docs/toc/clerk.tsx b/hbase-website/app/components/docs/toc/clerk.tsx
new file mode 100644
index 000000000000..4a8744e13562
--- /dev/null
+++ b/hbase-website/app/components/docs/toc/clerk.tsx
@@ -0,0 +1,182 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import * as Primitive from "fumadocs-core/toc";
+import { type ComponentProps, useEffect, useRef, useState } from "react";
+import { TocThumb, useTOCItems } from "./index";
+import { useI18n } from "fumadocs-ui/contexts/i18n";
+import { cn, mergeRefs } from "@/lib/utils";
+
+export function TOCItems({ ref, className, ...props }: ComponentProps<"div">) {
+ const containerRef = useRef(null);
+ const items = useTOCItems();
+ const { text } = useI18n();
+
+ const [svg, setSvg] = useState<{
+ path: string;
+ width: number;
+ height: number;
+ }>();
+
+ useEffect(() => {
+ if (!containerRef.current) return;
+ const container = containerRef.current;
+
+ function onResize(): void {
+ if (container.clientHeight === 0) return;
+ let w = 0,
+ h = 0;
+ const d: string[] = [];
+ for (let i = 0; i < items.length; i++) {
+ const element: HTMLElement | null = container.querySelector(
+ `a[href="#${items[i].url.slice(1)}"]`
+ );
+ if (!element) continue;
+
+ const styles = getComputedStyle(element);
+ const offset = getLineOffset(items[i].depth) + 1,
+ top = element.offsetTop + parseFloat(styles.paddingTop),
+ bottom = element.offsetTop + element.clientHeight - parseFloat(styles.paddingBottom);
+
+ w = Math.max(offset, w);
+ h = Math.max(h, bottom);
+
+ d.push(`${i === 0 ? "M" : "L"}${offset} ${top}`);
+ d.push(`L${offset} ${bottom}`);
+ }
+
+ setSvg({
+ path: d.join(" "),
+ width: w + 1,
+ height: h
+ });
+ }
+
+ const observer = new ResizeObserver(onResize);
+ onResize();
+
+ observer.observe(container);
+ return () => {
+ observer.disconnect();
+ };
+ }, [items]);
+
+ if (items.length === 0)
+ return (
+
+ {text.tocNoHeadings}
+
+ );
+
+ return (
+ <>
+ {svg && (
+
`
+ )
+ }")`
+ }}
+ >
+
+
+ )}
+
+ {items.map((item, i) => (
+
+ ))}
+
+ >
+ );
+}
+
+function getItemOffset(depth: number): number {
+ if (depth <= 2) return 14;
+ if (depth === 3) return 26;
+ return 36;
+}
+
+function getLineOffset(depth: number): number {
+ return depth >= 3 ? 10 : 0;
+}
+
+function TOCItem({
+ item,
+ upper = item.depth,
+ lower = item.depth
+}: {
+ item: Primitive.TOCItemType;
+ upper?: number;
+ lower?: number;
+}) {
+ const offset = getLineOffset(item.depth),
+ upperOffset = getLineOffset(upper),
+ lowerOffset = getLineOffset(lower);
+
+ return (
+
+ {offset !== upperOffset && (
+
+ )}
+
+ {item.title}
+
+ );
+}
diff --git a/hbase-website/app/components/docs/toc/default.tsx b/hbase-website/app/components/docs/toc/default.tsx
new file mode 100644
index 000000000000..a4630c0bb0f4
--- /dev/null
+++ b/hbase-website/app/components/docs/toc/default.tsx
@@ -0,0 +1,70 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { useI18n } from "fumadocs-ui/contexts/i18n";
+import { type ComponentProps, useRef } from "react";
+import { TocThumb, useTOCItems } from "./index";
+import * as Primitive from "fumadocs-core/toc";
+import { cn, mergeRefs } from "@/lib/utils";
+
+export function TOCItems({ ref, className, ...props }: ComponentProps<"div">) {
+ const containerRef = useRef(null);
+ const items = useTOCItems();
+ const { text } = useI18n();
+
+ if (items.length === 0)
+ return (
+
+ {text.tocNoHeadings}
+
+ );
+
+ return (
+ <>
+
+
+ {items.map((item) => (
+
+ ))}
+
+ >
+ );
+}
+
+function TOCItem({ item }: { item: Primitive.TOCItemType }) {
+ return (
+ = 4 && "ps-8"
+ )}
+ >
+ {item.title}
+
+ );
+}
diff --git a/hbase-website/app/components/docs/toc/index.tsx b/hbase-website/app/components/docs/toc/index.tsx
new file mode 100644
index 000000000000..238c086e09b8
--- /dev/null
+++ b/hbase-website/app/components/docs/toc/index.tsx
@@ -0,0 +1,133 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import * as Primitive from "fumadocs-core/toc";
+import {
+ type ComponentProps,
+ createContext,
+ type RefObject,
+ use,
+ useEffect,
+ useEffectEvent,
+ useRef
+} from "react";
+import { useOnChange } from "fumadocs-core/utils/use-on-change";
+import { cn, mergeRefs } from "@/lib/utils";
+
+const TOCContext = createContext([]);
+
+export function useTOCItems(): Primitive.TOCItemType[] {
+ return use(TOCContext);
+}
+
+export function TOCProvider({
+ toc,
+ children,
+ ...props
+}: ComponentProps) {
+ return (
+
+
+ {children}
+
+
+ );
+}
+
+export function TOCScrollArea({ ref, className, ...props }: ComponentProps<"div">) {
+ const viewRef = useRef(null);
+
+ return (
+
+ {props.children}
+
+ );
+}
+
+type TocThumbType = [top: number, height: number];
+
+interface RefProps {
+ containerRef: RefObject;
+}
+
+export function TocThumb({ containerRef, ...props }: ComponentProps<"div"> & RefProps) {
+ const thumbRef = useRef(null);
+ const active = Primitive.useActiveAnchors();
+ function update(info: TocThumbType): void {
+ const element = thumbRef.current;
+ if (!element) return;
+ element.style.setProperty("--fd-top", `${info[0]}px`);
+ element.style.setProperty("--fd-height", `${info[1]}px`);
+ }
+
+ const onPrint = useEffectEvent(() => {
+ if (containerRef.current) {
+ update(calc(containerRef.current, active));
+ }
+ });
+
+ useEffect(() => {
+ if (!containerRef.current) return;
+ const container = containerRef.current;
+
+ const observer = new ResizeObserver(onPrint);
+ observer.observe(container);
+
+ return () => {
+ observer.disconnect();
+ };
+ }, [containerRef, onPrint]);
+
+ useOnChange(active, () => {
+ if (containerRef.current) {
+ update(calc(containerRef.current, active));
+ }
+ });
+
+ return ;
+}
+
+function calc(container: HTMLElement, active: string[]): TocThumbType {
+ if (active.length === 0 || container.clientHeight === 0) {
+ return [0, 0];
+ }
+
+ let upper = Number.MAX_VALUE,
+ lower = 0;
+
+ for (const item of active) {
+ const element = container.querySelector(`a[href="#${item}"]`);
+ if (!element) continue;
+
+ const styles = getComputedStyle(element);
+ upper = Math.min(upper, element.offsetTop + parseFloat(styles.paddingTop));
+ lower = Math.max(
+ lower,
+ element.offsetTop + element.clientHeight - parseFloat(styles.paddingBottom)
+ );
+ }
+
+ return [upper, lower - upper];
+}
diff --git a/hbase-website/app/components/links.ts b/hbase-website/app/components/links.ts
index c4fe3d008195..99db6310e9f3 100644
--- a/hbase-website/app/components/links.ts
+++ b/hbase-website/app/components/links.ts
@@ -69,7 +69,7 @@ export const projectLinks: LinkType[] = [
export const documentationLinks: (LinkType | NestedLinkType)[] = [
{
label: "Reference Guide",
- to: "https://hbase.apache.org/book.html"
+ to: "/docs"
},
{
label: "Reference Guide (PDF)",
diff --git a/hbase-website/app/components/markdown-layout.tsx b/hbase-website/app/components/markdown-layout.tsx
index f34886cc5533..3a99cb9bee4e 100644
--- a/hbase-website/app/components/markdown-layout.tsx
+++ b/hbase-website/app/components/markdown-layout.tsx
@@ -32,6 +32,7 @@ interface MarkdownLayoutProps {
components?: Components;
}
+// Deprecated
export function MarkdownLayout({
children,
autoLinkHeadings = false,
diff --git a/hbase-website/app/components/mdx-components.tsx b/hbase-website/app/components/mdx-components.tsx
new file mode 100644
index 000000000000..c9c9714c2707
--- /dev/null
+++ b/hbase-website/app/components/mdx-components.tsx
@@ -0,0 +1,122 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import type { MDXComponents } from "mdx/types";
+import { ExternalLinkIcon } from "lucide-react";
+import defaultMdxComponents from "fumadocs-ui/mdx";
+import { cn } from "@/lib/utils";
+import { Link } from "./link";
+
+export function getMDXComponents(overrides?: MDXComponents): MDXComponents {
+ return {
+ ...defaultMdxComponents,
+ h1: (props) => (
+
+ ),
+ h2: (props) => (
+
+ ),
+ h3: (props) => (
+
+ ),
+ p: (props) => ,
+ ol: (props) => ,
+ ul: (props) =>
+ );
+}
diff --git a/hbase-website/app/components/site-footer.tsx b/hbase-website/app/components/site-footer.tsx
index 4de15994420c..6dbbf88f5347 100644
--- a/hbase-website/app/components/site-footer.tsx
+++ b/hbase-website/app/components/site-footer.tsx
@@ -18,25 +18,7 @@
import { Link } from "@/components/link";
import { projectLinks, documentationLinks, asfLinks } from "./links";
-
-function ExternalIcon() {
- return (
-
- );
-}
+import { ExternalLink } from "lucide-react";
export function SiteFooter() {
return (
@@ -60,7 +42,7 @@ export function SiteFooter() {
className="hover:text-foreground inline-flex items-center"
>
{link.label}
- {link.external && }
+ {link.external && }
))}
@@ -78,7 +60,7 @@ export function SiteFooter() {
className="hover:text-foreground inline-flex items-center"
>
{link.label}
- {link.external && }
+ {link.external && }
) : (
@@ -90,7 +72,7 @@ export function SiteFooter() {
className="hover:text-foreground inline-flex items-center"
>
{link.label}
- {link.external && }
+ {link.external && }
))
@@ -109,7 +91,7 @@ export function SiteFooter() {
className="hover:text-foreground inline-flex items-center"
>
{link.label}
- {link.external && }
+ {link.external && }
))}
diff --git a/hbase-website/app/lib/source.ts b/hbase-website/app/lib/source.ts
new file mode 100644
index 000000000000..54746a49634e
--- /dev/null
+++ b/hbase-website/app/lib/source.ts
@@ -0,0 +1,25 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { loader } from "fumadocs-core/source";
+import { create, docs } from "@/.source";
+
+export const source = loader({
+ source: await create.sourceAsync(docs.doc, docs.meta),
+ baseUrl: "/docs"
+});
diff --git a/hbase-website/app/lib/urls.ts b/hbase-website/app/lib/urls.ts
new file mode 100644
index 000000000000..780b22b8b18b
--- /dev/null
+++ b/hbase-website/app/lib/urls.ts
@@ -0,0 +1,32 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+export function normalize(urlOrPath: string) {
+ if (urlOrPath.length > 1 && urlOrPath.endsWith("/")) return urlOrPath.slice(0, -1);
+ return urlOrPath;
+}
+
+/**
+ * @returns if `href` is matching the given pathname
+ */
+export function isActive(href: string, pathname: string, nested = true): boolean {
+ href = normalize(href);
+ pathname = normalize(pathname);
+
+ return href === pathname || (nested && pathname.startsWith(`${href}/`));
+}
diff --git a/hbase-website/app/lib/utils.ts b/hbase-website/app/lib/utils.ts
index 38ce31bca191..213918007b86 100644
--- a/hbase-website/app/lib/utils.ts
+++ b/hbase-website/app/lib/utils.ts
@@ -22,3 +22,17 @@ import { twMerge } from "tailwind-merge";
export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs));
}
+
+import type * as React from "react";
+
+export function mergeRefs(...refs: (React.Ref | undefined)[]): React.RefCallback {
+ return (value) => {
+ refs.forEach((ref) => {
+ if (typeof ref === "function") {
+ ref(value);
+ } else if (ref) {
+ ref.current = value;
+ }
+ });
+ };
+}
diff --git a/hbase-website/app/pages/_docs/docs-layout.tsx b/hbase-website/app/pages/_docs/docs-layout.tsx
new file mode 100644
index 000000000000..cadfd4b4a1ef
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs-layout.tsx
@@ -0,0 +1,33 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+import { RootProvider } from "fumadocs-ui/provider/react-router";
+import { Outlet } from "react-router";
+import { SearchDialog } from "@/components/docs/search/docs-search";
+
+export default function DocsLayout() {
+ return (
+
+
+
+ );
+}
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/acl-matrix.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/acl-matrix.mdx
new file mode 100644
index 000000000000..6202b0376436
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/acl-matrix.mdx
@@ -0,0 +1,154 @@
+---
+title: "Access Control Matrix"
+description: "The following matrix shows the permission set required to perform operations in HBase. Before using the table, read through the information about how to interpret it."
+---
+
+## Interpreting the ACL Matrix Table
+
+The following conventions are used in the ACL Matrix table:
+
+### Scopes
+
+Permissions are evaluated starting at the widest scope and working to the narrowest scope.
+
+A scope corresponds to a level of the data model. From broadest to narrowest, the scopes are as follows:
+
+#### Scopes
+
+- Global
+- Namespace (NS)
+- Table
+- Column Family (CF)
+- Column Qualifier (CQ)
+- Cell
+
+For instance, a permission granted at table level dominates any grants done at the Column Family, Column Qualifier, or cell level. The user can do what that grant implies at any location in the table. A permission granted at global scope dominates all: the user is always allowed to take that action everywhere.
+
+### Permissions
+
+Possible permissions include the following:
+
+#### Permissions
+
+- Superuser - a special user that belongs to group "supergroup" and has unlimited access
+- Admin (A)
+- Create (C)
+- Write (W)
+- Read (R)
+- Execute (X)
+
+For the most part, permissions work in an expected way, with the following caveats:
+
+**Having Write permission does not imply Read permission.**
+It is possible and sometimes desirable for a user to be able to write data that same user cannot read. One such example is a log-writing process.
+
+**The `hbase:meta` table is readable by every user, regardless of the user's other grants or restrictions.**
+This is a requirement for HBase to function correctly.
+
+**`CheckAndPut` and `CheckAndDelete` operations will fail if the user does not have both Write and Read permission.**
+**`Increment` and `Append` operations do not require Read access.**
+**The `superuser`, as the name suggests has permissions to perform all possible operations.**
+**And for the operations marked with \*, the checks are done in post hook and only subset of results satisfying access checks are returned back to the user.**
+The following table is sorted by the interface that provides each operation.
+In case the table goes out of date, the unit tests which check for accuracy of permissions can be found in _hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java_, and the access controls themselves can be examined in _hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java_.
+
+## ACL Matrix
+
+| Interface | Operation | Permissions |
+| ---------------- | ----------------------------------- | --------------------------------------------------------------------------------------------------- |
+| Master | createTable | superuser\|global(C)\|NS(C) |
+| | modifyTable | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | deleteTable | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | truncateTable | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | addColumn | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | modifyColumn | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C)\|column(A)\|column(C) |
+| | deleteColumn | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C)\|column(A)\|column(C) |
+| | enableTable | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | disableTable | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | disableAclTable | Not allowed |
+| | move | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | assign | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | unassign | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | regionOffline | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | balance | superuser\|global(A) |
+| | balanceSwitch | superuser\|global(A) |
+| | shutdown | superuser\|global(A) |
+| | stopMaster | superuser\|global(A) |
+| | snapshot | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | listSnapshot | superuser\|global(A)\|SnapshotOwner |
+| | cloneSnapshot | superuser\|global(A)\|(SnapshotOwner & TableName matches) |
+| | restoreSnapshot | superuser\|global(A)\|SnapshotOwner & (NS(A)\|TableOwner\|table(A)) |
+| | deleteSnapshot | superuser\|global(A)\|SnapshotOwner |
+| | createNamespace | superuser\|global(A) |
+| | deleteNamespace | superuser\|global(A) |
+| | modifyNamespace | superuser\|global(A) |
+| | getNamespaceDescriptor | superuser\|global(A)\|NS(A) |
+| | listNamespaceDescriptors\* | superuser\|global(A)\|NS(A) |
+| | flushTable | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | getTableDescriptors\* | superuser\|global(A)\|global(C)\|NS(A)\|NS(C)\|TableOwner\|table(A)\|table(C) |
+| | getTableNames\* | superuser\|TableOwner\|Any global or table perm |
+| | setUserQuota(global level) | superuser\|global(A) |
+| | setUserQuota(namespace level) | superuser\|global(A) |
+| | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | setTableQuota | superuser\|global(A)\|NS(A)\|TableOwner\|table(A) |
+| | setNamespaceQuota | superuser\|global(A) |
+| | addReplicationPeer | superuser\|global(A) |
+| | removeReplicationPeer | superuser\|global(A) |
+| | enableReplicationPeer | superuser\|global(A) |
+| | disableReplicationPeer | superuser\|global(A) |
+| | getReplicationPeerConfig | superuser\|global(A) |
+| | updateReplicationPeerConfig | superuser\|global(A) |
+| | listReplicationPeers | superuser\|global(A) |
+| | getClusterStatus | any user |
+| Region | openRegion | superuser\|global(A) |
+| | closeRegion | superuser\|global(A) |
+| | flush | superuser\|global(A)\|global(C)\|TableOwner\|table(A)\|table(C) |
+| | split | superuser\|global(A)\|TableOwner\|TableOwner\|table(A) |
+| | compact | superuser\|global(A)\|global(C)\|TableOwner\|table(A)\|table(C) |
+| | getClosestRowBefore | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | getOp | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | exists | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | put | superuser\|global(W)\|NS(W)\|table(W)\|TableOwner\|CF(W)\|CQ(W) |
+| | delete | superuser\|global(W)\|NS(W)\|table(W)\|TableOwner\|CF(W)\|CQ(W) |
+| | batchMutate | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W) |
+| | checkAndPut | superuser\|global(RW)\|NS(RW)\|TableOwner\|table(RW)\|CF(RW)\|CQ(RW) |
+| | checkAndPutAfterRowLock | superuser\|global(R)\|NS(R)\|TableOwner\|Table(R)\|CF(R)\|CQ(R) |
+| | checkAndDelete | superuser\|global(RW)\|NS(RW)\|TableOwner\|table(RW)\|CF(RW)\|CQ(RW) |
+| | checkAndDeleteAfterRowLock | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | incrementColumnValue | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W) |
+| | append | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W) |
+| | appendAfterRowLock | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W) |
+| | increment | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W) |
+| | incrementAfterRowLock | superuser\|global(W)\|NS(W)\|TableOwner\|table(W)\|CF(W)\|CQ(W) |
+| | scannerOpen | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | scannerNext | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | scannerClose | superuser\|global(R)\|NS(R)\|TableOwner\|table(R)\|CF(R)\|CQ(R) |
+| | bulkLoadHFile | superuser\|global(C)\|TableOwner\|table(C)\|CF(C) |
+| | prepareBulkLoad | superuser\|global(C)\|TableOwner\|table(C)\|CF(C) |
+| | cleanupBulkLoad | superuser\|global(C)\|TableOwner\|table(C)\|CF(C) |
+| Endpoint | invoke | superuser\|global(X)\|NS(X)\|TableOwner\|table(X) |
+| AccessController | grant(global level) | global(A) |
+| | grant(namespace level) | global(A)\|NS(A) |
+| | grant(table level) | global(A)\|NS(A)\|TableOwner\|table(A)\|CF(A)\|CQ(A) |
+| | revoke(global level) | global(A) |
+| | revoke(namespace level) | global(A)\|NS(A) |
+| | revoke(table level) | global(A)\|NS(A)\|TableOwner\|table(A)\|CF(A)\|CQ(A) |
+| | getUserPermissions(global level) | global(A) |
+| | getUserPermissions(namespace level) | global(A)\|NS(A) |
+| | getUserPermissions(table level) | global(A)\|NS(A)\|TableOwner\|table(A)\|CF(A)\|CQ(A) |
+| | hasPermission(table level) | global(A)\|SelfUserCheck |
+| RegionServer | stopRegionServer | superuser\|global(A) |
+| | mergeRegions | superuser\|global(A) |
+| | rollWALWriterRequest | superuser\|global(A) |
+| | replicateLogEntries | superuser\|global(W) |
+| RSGroup | addRSGroup | superuser\|global(A) |
+| | balanceRSGroup | superuser\|global(A) |
+| | getRSGroupInfo | superuser\|global(A) |
+| | getRSGroupInfoOfTable | superuser\|global(A) |
+| | getRSGroupOfServer | superuser\|global(A) |
+| | listRSGroups | superuser\|global(A) |
+| | moveServers | superuser\|global(A) |
+| | moveServersAndTables | superuser\|global(A) |
+| | moveTables | superuser\|global(A) |
+| | removeRSGroup | superuser\|global(A) |
+| | removeServers | superuser\|global(A) |
diff --git a/src/main/asciidoc/_chapters/amv2.adoc b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/amv2.mdx
similarity index 57%
rename from src/main/asciidoc/_chapters/amv2.adoc
rename to hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/amv2.mdx
index 49841ce32557..d496ccb90dc1 100644
--- a/src/main/asciidoc/_chapters/amv2.adoc
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/amv2.mdx
@@ -1,48 +1,23 @@
-////
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-////
-[[amv2]]
-= AMv2 Description for Devs
-:doctype: book
-:numbered:
-:toc: left
-:icons: font
-:experimental:
-
-The AssignmentManager (AM) in HBase Master manages assignment of Regions over a cluster of RegionServers.
+---
+title: "AMv2 Description for Devs"
+description: "The AssignmentManager (AM) in HBase Master manages assignment of Regions over a cluster of RegionServers."
+---
The AMv2 project is a redo of Assignment in an attempt at addressing the root cause of many of our operational issues in production, namely slow assignment and problematic accounting such that Regions are misplaced stuck offline in the notorious _Regions-In-Transition (RIT)_ limbo state.
Below are notes for devs on key aspects of AMv2 in no particular order.
-== Background
+## Background [#amv2-background]
-Assignment in HBase 1.x has been problematic in operation. It is not hard to see why. Region state is kept at the other end of an RPC in ZooKeeper (Terminal states -- i.e. OPEN or CLOSED -- are published to the _hbase:meta_ table). In HBase-1.x.x, state has multiple writers with Master and RegionServers all able to make state edits concurrently (in _hbase:meta_ table and out on ZooKeeper). If clocks are awry or watchers missed, state changes can be skipped or overwritten. Locking of HBase Entities -- tables, regions -- is not comprehensive so a table operation -- disable/enable -- could clash with a region-level operation; a split or merge. Region state is distributed and hard to reason about and test. Assignment is slow in operation because each assign involves moving remote znodes through transitions. Cluster size tends to top out at a couple of hundred thousand regions; beyond this, cluster start/stop takes hours and is prone to corruption.
+Assignment in HBase 1.x has been problematic in operation. It is not hard to see why. Region state is kept at the other end of an RPC in ZooKeeper (Terminal states — i.e. OPEN or CLOSED — are published to the _hbase:meta_ table). In HBase-1.x.x, state has multiple writers with Master and RegionServers all able to make state edits concurrently (in _hbase:meta_ table and out on ZooKeeper). If clocks are awry or watchers missed, state changes can be skipped or overwritten. Locking of HBase Entities — tables, regions — is not comprehensive so a table operation — disable/enable — could clash with a region-level operation; a split or merge. Region state is distributed and hard to reason about and test. Assignment is slow in operation because each assign involves moving remote znodes through transitions. Cluster size tends to top out at a couple of hundred thousand regions; beyond this, cluster start/stop takes hours and is prone to corruption.
-AMv2 (AssignmentManager Version 2) is a refactor (https://issues.apache.org/jira/browse/HBASE-14350[HBASE-14350]) of the hbase-1.x AssignmentManager putting it up on a https://issues.apache.org/jira/browse/HBASE-12439[ProcedureV2 (HBASE-12439)] basis. ProcedureV2 (Pv2)__,__ is an awkwardly named system that allows describing and running multi-step state machines. It is performant and persists all state to a Store which is recoverable post crash. See the companion chapter on <>, to learn more about the ProcedureV2 system.
+AMv2 (AssignmentManager Version 2) is a refactor ([HBASE-14350](https://issues.apache.org/jira/browse/HBASE-14350)) of the hbase-1.x AssignmentManager putting it up on a [ProcedureV2 (HBASE-12439)](https://issues.apache.org/jira/browse/HBASE-12439) basis. ProcedureV2 (Pv2), is an awkwardly named system that allows describing and running multi-step state machines. It is performant and persists all state to a Store which is recoverable post crash. See the companion chapter on [Procedure Framework (Pv2)](/docs/pv2), to learn more about the ProcedureV2 system.
-In AMv2, all assignment, crash handling, splits and merges are recast as Procedures(v2). ZooKeeper is purged from the mix. As before, the final assignment state gets published to _hbase:meta_ for non-Master participants to read (all-clients) with intermediate state kept in the local Pv2 WAL-based ‘store’ but only the active Master, a single-writer, evolves state. The Master’s in-memory cluster image is the authority and if disagreement, RegionServers are forced to comply. Pv2 adds shared/exclusive locking of all core HBase Entities -- namespace, tables, and regions -- to ensure one actor at a time access and to prevent operations contending over resources (move/split, disable/assign, etc.).
+In AMv2, all assignment, crash handling, splits and merges are recast as Procedures(v2). ZooKeeper is purged from the mix. As before, the final assignment state gets published to _hbase:meta_ for non-Master participants to read (all-clients) with intermediate state kept in the local Pv2 WAL-based 'store' but only the active Master, a single-writer, evolves state. The Master's in-memory cluster image is the authority and if disagreement, RegionServers are forced to comply. Pv2 adds shared/exclusive locking of all core HBase Entities — namespace, tables, and regions — to ensure one actor at a time access and to prevent operations contending over resources (move/split, disable/assign, etc.).
This redo of AM atop of a purposed, performant state machine with all operations taking on the common Procedure form with a single state writer only moves our AM to a new level of resilience and scale.
-== New System
+## New System
Each Region Assign or Unassign of a Region is now a Procedure. A Move (Region) Procedure is a compound of Procedures; it is the running of an Unassign Procedure followed by an Assign Procedure. The Move Procedure spawns the Assign and Unassign in series and then waits on their completions.
@@ -50,108 +25,106 @@ And so on. ServerCrashProcedure spawns the WAL splitting tasks and then the reas
AMv2 Procedures are run by the Master in a ProcedureExecutor instance. All Procedures make use of utility provided by the Pv2 framework.
-For example, Procedures persist each state transition to the frameworks’ Procedure Store. The default implementation is done as a WAL kept on HDFS. On crash, we reopen the Store and rerun all WALs of Procedure transitions to put the Assignment State Machine back into the attitude it had just before crash. We then continue Procedure execution.
+For example, Procedures persist each state transition to the frameworks' Procedure Store. The default implementation is done as a WAL kept on HDFS. On crash, we reopen the Store and rerun all WALs of Procedure transitions to put the Assignment State Machine back into the attitude it had just before crash. We then continue Procedure execution.
In the new system, the Master is the Authority on all things Assign. Previous we were ambiguous; e.g. the RegionServer was in charge of Split operations. Master keeps an in-memory image of Region states and servers. If disagreement, the Master always prevails; at an extreme it will kill the RegionServer that is in disagreement.
-A new RegionStateStore class takes care of publishing the terminal Region state, whether OPEN or CLOSED, out to the _hbase:meta _table__.__
+A new RegionStateStore class takes care of publishing the terminal Region state, whether OPEN or CLOSED, out to the _hbase:meta_ table.
RegionServers now report their run version on Connection. This version is available inside the AM for use running migrating rolling restarts.
-== Procedures Detail
+## Procedures Detail
-=== Assign/Unassign
+### Assign/Unassign
Assign and Unassign subclass a common RegionTransitionProcedure. There can only be one RegionTransitionProcedure per region running at a time since the RTP instance takes a lock on the region. The RTP base Procedure has three steps; a store the procedure step (REGION_TRANSITION_QUEUE); a dispatch of the procedure open or close followed by a suspend waiting on the remote regionserver to report successful open or fail (REGION_TRANSITION_DISPATCH) or notification that the server fielding the request crashed; and finally registration of the successful open/close in hbase:meta (REGION_TRANSITION_FINISH).
Here is how the assign of a region 56f985a727afe80a184dac75fbf6860c looks in the logs. The assign was provoked by a Server Crash (Process ID 1176 or pid=1176 which when it is the parent of a procedure, it is identified as ppid=1176). The assign is pid=1179, the second region of the two being assigned by this Server Crash.
-[source]
-----
+```
2017-05-23 12:04:24,175 INFO [ProcExecWrkr-30] procedure2.ProcedureExecutor: Initialized subprocedures=[{pid=1178, ppid=1176, state=RUNNABLE:REGION_TRANSITION_QUEUE; AssignProcedure table=IntegrationTestBigLinkedList, region=bfd57f0b72fd3ca77e9d3c5e3ae48d76, target=ve0540.halxg.example.org,16020,1495525111232}, {pid=1179, ppid=1176, state=RUNNABLE:REGION_TRANSITION_QUEUE; AssignProcedure table=IntegrationTestBigLinkedList, region=56f985a727afe80a184dac75fbf6860c, target=ve0540.halxg.example.org,16020,1495525111232}]
-----
+```
-Next we start the assign by queuing (‘registering’) the Procedure with the framework.
+Next we start the assign by queuing ('registering') the Procedure with the framework.
-[source]
-----
+```
2017-05-23 12:04:24,241 INFO [ProcExecWrkr-30] assignment.AssignProcedure: Start pid=1179, ppid=1176, state=RUNNABLE:REGION_TRANSITION_QUEUE; AssignProcedure table=IntegrationTestBigLinkedList, region=56f985a727afe80a184dac75fbf6860c, target=ve0540.halxg.example.org,16020,1495525111232; rit=OFFLINE, location=ve0540.halxg.example.org,16020,1495525111232; forceNewPlan=false, retain=false
-----
+```
-Track the running of Procedures in logs by tracing their process id -- here pid=1179.
+Track the running of Procedures in logs by tracing their process id — here pid=1179.
Next we move to the dispatch phase where we update hbase:meta table setting the region state as OPENING on server ve540. We then dispatch an rpc to ve540 asking it to open the region. Thereafter we suspend the Assign until we get a message back from ve540 on whether it has opened the region successfully (or not).
-[source]
-----
+```
2017-05-23 12:04:24,494 INFO [ProcExecWrkr-38] assignment.RegionStateStore: pid=1179 updating hbase:meta row=IntegrationTestBigLinkedList,H\xE3@\x8D\x964\x9D\xDF\x8F@9\x0F\xC8\xCC\xC2,1495566261066.56f985a727afe80a184dac75fbf6860c., regionState=OPENING, regionLocation=ve0540.halxg.example.org,16020,1495525111232
2017-05-23 12:04:24,498 INFO [ProcExecWrkr-38] assignment.RegionTransitionProcedure: Dispatch pid=1179, ppid=1176, state=RUNNABLE:REGION_TRANSITION_DISPATCH; AssignProcedure table=IntegrationTestBigLinkedList, region=56f985a727afe80a184dac75fbf6860c, target=ve0540.halxg.example.org,16020,1495525111232; rit=OPENING, location=ve0540.halxg.example.org,16020,1495525111232
-----
+```
-Below we log the incoming report that the region opened successfully on ve540. The Procedure is woken up (you can tell it the procedure is running by the name of the thread, its a ProcedureExecutor thread, ProcExecWrkr-9). The woken up Procedure updates state in hbase:meta to denote the region as open on ve0540. It then reports finished and exits.
+Below we log the incoming report that the region opened successfully on ve540. The Procedure is woken up (you can tell it the procedure is running by the name of the thread, its a ProcedureExecutor thread, ProcExecWrkr-9). The woken up Procedure updates state in hbase:meta to denote the region as open on ve0540. It then reports finished and exits.
-[source]
-----
-2017-05-23 12:04:26,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=46,queue=1,port=16000] assignment.RegionTransitionProcedure: Received report OPENED seqId=11984985, pid=1179, ppid=1176, state=RUNNABLE:REGION_TRANSITION_DISPATCH; AssignProcedure table=IntegrationTestBigLinkedList, region=56f985a727afe80a184dac75fbf6860c, target=ve0540.halxg.example.org,16020,1495525111232; rit=OPENING, location=ve0540.halxg.example.org,16020,1495525111232 2017-05-23 12:04:26,643 INFO [ProcExecWrkr-9] assignment.RegionStateStore: pid=1179 updating hbase:meta row=IntegrationTestBigLinkedList,H\xE3@\x8D\x964\x9D\xDF\x8F@9\x0F\xC8\xCC\xC2,1495566261066.56f985a727afe80a184dac75fbf6860c., regionState=OPEN, openSeqNum=11984985, regionLocation=ve0540.halxg.example.org,16020,1495525111232
+```
+2017-05-23 12:04:26,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=46,queue=1,port=16000] assignment.RegionTransitionProcedure: Received report OPENED seqId=11984985, pid=1179, ppid=1176, state=RUNNABLE:REGION_TRANSITION_DISPATCH; AssignProcedure table=IntegrationTestBigLinkedList, region=56f985a727afe80a184dac75fbf6860c, target=ve0540.halxg.example.org,16020,1495525111232; rit=OPENING, location=ve0540.halxg.example.org,16020,1495525111232
+2017-05-23 12:04:26,643 INFO [ProcExecWrkr-9] assignment.RegionStateStore: pid=1179 updating hbase:meta row=IntegrationTestBigLinkedList,H\xE3@\x8D\x964\x9D\xDF\x8F@9\x0F\xC8\xCC\xC2,1495566261066.56f985a727afe80a184dac75fbf6860c., regionState=OPEN, openSeqNum=11984985, regionLocation=ve0540.halxg.example.org,16020,1495525111232
2017-05-23 12:04:26,836 INFO [ProcExecWrkr-9] procedure2.ProcedureExecutor: Finish suprocedure pid=1179, ppid=1176, state=SUCCESS; AssignProcedure table=IntegrationTestBigLinkedList, region=56f985a727afe80a184dac75fbf6860c, target=ve0540.halxg.example.org,16020,1495525111232
-----
+```
+
Unassign looks similar given it is based on the base RegionTransitionProcedure. It has the same state transitions and does basically the same steps but with different state name (CLOSING, CLOSED).
Most other procedures are subclasses of a Pv2 StateMachine implementation. We have both Table and Region focused StateMachines types.
-== UI
+## UI [#amv2-ui]
-Along the top-bar on the Master, you can now find a ‘Procedures&Locks’ tab which takes you to a page that is ugly but useful. It dumps currently running procedures and framework locks. Look at this when you can’t figure what stuff is stuck; it will at least identify problematic procedures (take the pid and grep the logs…). Look for ROLLEDBACK or pids that have been RUNNING for a long time.
+Along the top-bar on the Master, you can now find a 'Procedures&Locks' tab which takes you to a page that is ugly but useful. It dumps currently running procedures and framework locks. Look at this when you can't figure what stuff is stuck; it will at least identify problematic procedures (take the pid and grep the logs...). Look for ROLLEDBACK or pids that have been RUNNING for a long time.
-== Logging
+## Logging
Procedures log their process ids as pid= and their parent ids (ppid=) everywhere. Work has been done so you can grep the pid and see history of a procedure operation.
-== Implementation Notes
+## Implementation Notes
In this section we note some idiosyncrasies of operation as an attempt at saving you some head-scratching.
-=== Region Transition RPC and RS Heartbeat can arrive at ~same time on Master
+### Region Transition RPC and RS Heartbeat can arrive at ~same time on Master
-Reporting Region Transition on a RegionServer is now a RPC distinct from RS heartbeating (‘RegionServerServices’ Service). An heartbeat and a status update can arrive at the Master at about the same time. The Master will update its internal state for a Region but this same state is checked when heartbeat processing. We may find the unexpected; i.e. a Region just reported as CLOSED so heartbeat is surprised to find region OPEN on the back of the RS report. In the new system, all slaves must cow to the Masters’ understanding of cluster state; the Master will kill/close any misaligned entities.
+Reporting Region Transition on a RegionServer is now a RPC distinct from RS heartbeating ('RegionServerServices' Service). An heartbeat and a status update can arrive at the Master at about the same time. The Master will update its internal state for a Region but this same state is checked when heartbeat processing. We may find the unexpected; i.e. a Region just reported as CLOSED so heartbeat is surprised to find region OPEN on the back of the RS report. In the new system, all slaves must cow to the Masters' understanding of cluster state; the Master will kill/close any misaligned entities.
To address the above, we added a lastUpdate for in-memory Master state. Let a region state have some vintage before we act on it (one second currently).
-=== Master as RegionServer or as RegionServer that just does system tables
+### Master as RegionServer or as RegionServer that just does system tables
AMv2 enforces current master branch default of HMaster carrying system tables only; i.e. the Master in an HBase cluster acts also as a RegionServer only it is the exclusive host for tables such as _hbase:meta_, _hbase:namespace_, etc., the core system tables. This is causing a couple of test failures as AMv1, though it is not supposed to, allows moving hbase:meta off Master while AMv2 does not.
-== New Configs
+## New Configs
-These configs all need doc on when you’d change them.
+These configs all need doc on when you'd change them.
-=== hbase.procedure.remote.dispatcher.threadpool.size
+#### hbase.procedure.remote.dispatcher.threadpool.size [!toc]
Defaults 128
-=== hbase.procedure.remote.dispatcher.delay.msec
+#### hbase.procedure.remote.dispatcher.delay.msec [!toc]
Default 150ms
-=== hbase.procedure.remote.dispatcher.max.queue.size
+#### hbase.procedure.remote.dispatcher.max.queue.size [!toc]
Default 32
-=== hbase.regionserver.rpc.startup.waittime
+#### hbase.regionserver.rpc.startup.waittime [!toc]
Default 60 seconds.
-== Tools
+## Tools [#amv2-tools]
HBASE-15592 Print Procedure WAL Content
-Patch in https://issues.apache.org/jira/browse/HBASE-18152[HBASE-18152] [AMv2] Corrupt Procedure WAL file; procedure data stored out of order https://issues.apache.org/jira/secure/attachment/12871066/reading_bad_wal.patch[https://issues.apache.org/jira/secure/attachment/12871066/reading_bad_wal.patch]
+Patch in [HBASE-18152](https://issues.apache.org/jira/browse/HBASE-18152) [AMv2] Corrupt Procedure WAL file; procedure data stored out of order https://issues.apache.org/jira/secure/attachment/12871066/reading_bad_wal.patch
-=== MasterProcedureSchedulerPerformanceEvaluation
+### MasterProcedureSchedulerPerformanceEvaluation
Tool to test performance of locks and queues in procedure scheduler independently from other framework components. Run this after any substantial changes in proc system. Prints nice output:
-----
+```text
******************************************
Time - addBack : 5.0600sec
Ops/sec - addBack : 1.9M
@@ -170,4 +143,4 @@ Threads : 10
Raw format for scripts
RESULT [num_ops=10000000, ops_type=both, num_table=5, regions_per_table=10, threads=10, num_yield=22025876, time_addback_ms=5060, time_poll_ms=19459]
-----
+```
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/bulk-loading.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/bulk-loading.mdx
new file mode 100644
index 000000000000..8c21b0b4540a
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/bulk-loading.mdx
@@ -0,0 +1,83 @@
+---
+title: "Bulk Loading"
+description: "Efficient methods for loading large datasets into HBase using MapReduce to generate HFiles and directly load them into the cluster."
+---
+
+## Overview [#bulk-loading-overview]
+
+HBase includes several methods of loading data into tables. The most straightforward method is to either use the `TableOutputFormat` class from a MapReduce job, or use the normal client APIs; however, these are not always the most efficient methods.
+
+The bulk load feature uses a MapReduce job to output table data in HBase's internal data format, and then directly load the generated StoreFiles into a running cluster. Using bulk load will use less CPU and network resources than loading via the HBase API.
+
+## Bulk Load Architecture
+
+The HBase bulk load process consists of two main steps.
+
+### Preparing data via a MapReduce job
+
+The first step of a bulk load is to generate HBase data files (StoreFiles) from a MapReduce job using `HFileOutputFormat2`. This output format writes out data in HBase's internal storage format so that they can be later loaded efficiently into the cluster.
+
+In order to function efficiently, `HFileOutputFormat2` must be configured such that each output HFile fits within a single region. In order to do this, jobs whose output will be bulk loaded into HBase use Hadoop's `TotalOrderPartitioner` class to partition the map output into disjoint ranges of the key space, corresponding to the key ranges of the regions in the table.
+
+`HFileOutputFormat2` includes a convenience function, `configureIncrementalLoad()`, which automatically sets up a `TotalOrderPartitioner` based on the current region boundaries of a table.
+
+### Completing the data load
+
+After a data import has been prepared, either by using the `importtsv` tool with the “importtsv.bulk.output” option or by some other MapReduce job using the `HFileOutputFormat`, the `completebulkload` tool is used to import the data into the running cluster. This command line tool iterates through the prepared data files, and for each one determines the region the file belongs to. It then contacts the appropriate RegionServer which adopts the HFile, moving it into its storage directory and making the data available to clients.
+
+If the region boundaries have changed during the course of bulk load preparation, or between the preparation and completion steps, the `completebulkload` utility will automatically split the data files into pieces corresponding to the new boundaries. This process is not optimally efficient, so users should take care to minimize the delay between preparing a bulk load and importing it into the cluster, especially if other clients are simultaneously loading data through other means.
+
+```bash
+$ hadoop jar hbase-mapreduce-VERSION.jar completebulkload [-c /path/to/hbase/config/hbase-site.xml] /user/todd/myoutput mytable
+```
+
+The `-c config-file` option can be used to specify a file containing the appropriate hbase parameters (e.g., hbase-site.xml) if not supplied already on the CLASSPATH (In addition, the CLASSPATH must contain the directory that has the zookeeper configuration file if zookeeper is NOT managed by HBase).
+
+## See Also [#bulk-loading-see-also]
+
+For more information about the referenced utilities, see [ImportTsv](/docs/operational-management/tools#importtsv) and [CompleteBulkLoad](/docs/operational-management/tools#completebulkload).
+
+See [How-to: Use HBase Bulk Loading, and Why](http://blog.cloudera.com/blog/2013/09/how-to-use-hbase-bulk-loading-and-why/) for an old blog post on loading.
+
+## Advanced Usage
+
+Although the `importtsv` tool is useful in many cases, advanced users may want to generate data programmatically, or import data from other formats. To get started doing so, dig into `ImportTsv.java` and check the JavaDoc for HFileOutputFormat.
+
+The import step of the bulk load can also be done programmatically. See the `LoadIncrementalHFiles` class for more information.
+
+### 'Adopting' Stray Data
+
+Should an HBase cluster lose account of regions or files during an outage or error, you can use the `completebulkload` tool to add back the dropped data. HBase operator tooling such as [HBCK2](https://github.com/apache/hbase-operator-tools/tree/master/hbase-hbck2) or the reporting added to the Master's UI under the `HBCK Report` (Since HBase 2.0.6/2.1.6/2.2.1) can identify such 'orphan' directories.
+
+Before you begin the 'adoption', ensure the `hbase:meta` table is in a healthy state. Run the `CatalogJanitor` by executing the `catalogjanitor_run` command on the HBase shell. When finished, check the `HBCK Report` page on the Master UI. Work on fixing any inconsistencies, holes, or overlaps found before proceeding. The `hbase:meta` table is the authority on where all data is to be found and must be consistent for the `completebulkload` tool to work properly.
+
+The `completebulkload` tool takes a directory and a `tablename`. The directory has subdirectories named for column families of the targeted `tablename`. In these subdirectories are `hfiles` to load. Given this structure, you can pass errant region directories (and the table name to which the region directory belongs) and the tool will bring the data files back into the fold by moving them under the approprate serving directory. If stray files, then you will need to mock up this structure before invoking the `completebulkload` tool; you may have to look at the file content using the [HFile Tool](/docs/operational-management/tools#operational-management-tools-hfile-tool) to see what the column family to use is. When the tool completes its run, you will notice that the source errant directory has had its storefiles moved/removed. It is now desiccated since its data has been drained, and the pointed-to directory can be safely removed. It may still have `.regioninfo` files and other subdirectories but they are of no relevance now (There may be content still under the _recovered_edits_ directory; a TODO is tooling to replay the content of _recovered_edits_ if needed; see [Add RecoveredEditsPlayer](https://issues.apache.org/jira/browse/HBASE-22976)). If you pass `completebulkload` a directory without store files, it will run and note the directory is storefile-free. Just remove such 'empty' directories.
+
+For example, presuming a directory at the top level in HDFS named `eb3352fb5c9c9a05feeb2caba101e1cc` has data we need to re-add to the HBase `TestTable`:
+
+```bash
+$ ${HBASE_HOME}/bin/hbase --config ~/hbase-conf completebulkload hdfs://server.example.org:9000/eb3352fb5c9c9a05feeb2caba101e1cc TestTable
+```
+
+After it successfully completes, any files that were in `eb3352fb5c9c9a05feeb2caba101e1cc` have been moved under hbase and the `eb3352fb5c9c9a05feeb2caba101e1cc` directory can be deleted (Check content before and after by running `ls -r` on the HDFS directory).
+
+## Bulk Loading Replication
+
+HBASE-13153 adds replication support for bulk loaded HFiles, available since HBase 1.3/2.0. This feature is enabled by setting `hbase.replication.bulkload.enabled` to `true` (default is `false`). You also need to copy the source cluster configuration files to the destination cluster.
+
+Additional configurations are required too:
+
+1. `hbase.replication.source.fs.conf.provider`
+ This defines the class which loads the source cluster file system client configuration in the destination cluster. This should be configured for all the RS in the destination cluster. Default is `org.apache.hadoop.hbase.replication.regionserver.DefaultSourceFSConfigurationProvider`.
+2. `hbase.replication.conf.dir`
+ This represents the base directory where the file system client configurations of the source cluster are copied to the destination cluster. This should be configured for all the RS in the destination cluster. Default is `$HBASE_CONF_DIR`.
+3. `hbase.replication.cluster.id`
+ This configuration is required in the cluster where replication for bulk loaded data is enabled. A source cluster is uniquely identified by the destination cluster using this id. This should be configured for all the RS in the source cluster configuration file for all the RS.
+
+For example: If source cluster FS client configurations are copied to the destination cluster under directory `/home/user/dc1/`, then `hbase.replication.cluster.id` should be configured as `dc1` and `hbase.replication.conf.dir` as `/home/user`.
+
+
+ `DefaultSourceFSConfigurationProvider` supports only `xml` type files. It loads source cluster FS
+ client configuration only once, so if source cluster FS client configuration files are updated,
+ every peer(s) cluster RS must be restarted to reload the configuration.
+
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/catalog-tables.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/catalog-tables.mdx
new file mode 100644
index 000000000000..bd10a6d35a1f
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/catalog-tables.mdx
@@ -0,0 +1,38 @@
+---
+title: "Catalog Tables"
+description: "Understanding hbase:meta catalog table structure, location tracking, and how HBase maintains region metadata."
+---
+
+The catalog table `hbase:meta` exists as an HBase table and is filtered out of the HBase shell's `list` command, but is in fact a table just like any other.
+
+## hbase:meta
+
+The `hbase:meta` table (previously called `.META.`) keeps a list of all regions in the system, and the location of `hbase:meta` is stored in ZooKeeper.
+
+The `hbase:meta` table structure is as follows:
+
+**Key:**
+
+- Region key of the format (`[table],[region start key],[region id]`)
+
+**Values:**
+
+- `info:regioninfo` (serialized [RegionInfo](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html) instance for this region)
+- `info:server` (server:port of the RegionServer containing this region)
+- `info:serverstartcode` (start-time of the RegionServer process containing this region)
+
+When a table is in the process of splitting, two other columns will be created, called `info:splitA` and `info:splitB`. These columns represent the two daughter regions. The values for these columns are also serialized HRegionInfo instances. After the region has been split, eventually this row will be deleted.
+
+
+ The empty key is used to denote table start and table end. A region with an empty start key is the
+ first region in a table. If a region has both an empty start and an empty end key, it is the only
+ region in the table
+
+
+In the (hopefully unlikely) event that programmatic processing of catalog metadata is required, see the [RegionInfo.parseFrom]() utility.
+
+## Startup Sequencing
+
+First, the location of `hbase:meta` is looked up in ZooKeeper. Next, `hbase:meta` is updated with server and startcode values.
+
+For information on region-RegionServer assignment, see [Region-RegionServer Assignment](/docs/architecture/regions#region-regionserver-assignment).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/client-request-filters.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/client-request-filters.mdx
new file mode 100644
index 000000000000..f54aca06e8d9
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/client-request-filters.mdx
@@ -0,0 +1,251 @@
+---
+title: "Client Request Filters"
+description: "Using filters with Get and Scan operations to efficiently query HBase data, including comparison, column, row, and utility filters."
+---
+
+[Get](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Get.html) and [Scan](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Scan.html) instances can be optionally configured with [filters](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/Filter.html) which are applied on the RegionServer.
+
+Filters can be confusing because there are many different types, and it is best to approach them by understanding the groups of Filter functionality.
+
+## Structural
+
+Structural Filters contain other Filters.
+
+### FilterList
+
+[FilterList](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html) represents a list of Filters with a relationship of `FilterList.Operator.MUST_PASS_ALL` or `FilterList.Operator.MUST_PASS_ONE` between the Filters. The following example shows an 'or' between two Filters (checking for either 'my value' or 'my other value' on the same attribute).
+
+```java
+FilterList list = new FilterList(FilterList.Operator.MUST_PASS_ONE);
+SingleColumnValueFilter filter1 = new SingleColumnValueFilter(
+ cf,
+ column,
+ CompareOperator.EQUAL,
+ Bytes.toBytes("my value")
+ );
+list.add(filter1);
+SingleColumnValueFilter filter2 = new SingleColumnValueFilter(
+ cf,
+ column,
+ CompareOperator.EQUAL,
+ Bytes.toBytes("my other value")
+ );
+list.add(filter2);
+scan.setFilter(list);
+```
+
+## Column Value
+
+### SingleColumnValueFilter
+
+A SingleColumnValueFilter (see: [https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html)) can be used to test column values for equivalence (`CompareOperaor.EQUAL`), inequality (`CompareOperaor.NOT_EQUAL`), or ranges (e.g., `CompareOperaor.GREATER`). The following is an example of testing equivalence of a column to a String value "my value"...
+
+```java
+SingleColumnValueFilter filter = new SingleColumnValueFilter(
+ cf,
+ column,
+ CompareOperaor.EQUAL,
+ Bytes.toBytes("my value")
+ );
+scan.setFilter(filter);
+```
+
+### ColumnValueFilter
+
+Introduced in HBase-2.0.0 version as a complementation of SingleColumnValueFilter, ColumnValueFilter gets matched cell only, while SingleColumnValueFilter gets the entire row (has other columns and values) to which the matched cell belongs. Parameters of constructor of ColumnValueFilter are the same as SingleColumnValueFilter.
+
+```java
+ColumnValueFilter filter = new ColumnValueFilter(
+ cf,
+ column,
+ CompareOperaor.EQUAL,
+ Bytes.toBytes("my value")
+ );
+scan.setFilter(filter);
+```
+
+Note. For simple query like "equals to a family:qualifier:value", we highly recommend to use the following way instead of using SingleColumnValueFilter or ColumnValueFilter:
+
+```java
+Scan scan = new Scan();
+scan.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qualifier"));
+ValueFilter vf = new ValueFilter(CompareOperator.EQUAL,
+ new BinaryComparator(Bytes.toBytes("value")));
+scan.setFilter(vf);
+...
+```
+
+This scan will restrict to the specified column 'family:qualifier', avoiding scan of unrelated families and columns, which has better performance, and `ValueFilter` is the condition used to do the value filtering.
+
+But if query is much more complicated beyond this book, then please make your good choice case by case.
+
+## Column Value Comparators
+
+There are several Comparator classes in the Filter package that deserve special mention. These Comparators are used in concert with other Filters, such as [SingleColumnValueFilter](/docs/architecture/client-request-filters#singlecolumnvaluefilter).
+
+### RegexStringComparator
+
+[RegexStringComparator](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html) supports regular expressions for value comparisons.
+
+```java
+RegexStringComparator comp = new RegexStringComparator("my."); // any value that starts with 'my'
+SingleColumnValueFilter filter = new SingleColumnValueFilter(
+ cf,
+ column,
+ CompareOperaor.EQUAL,
+ comp
+ );
+scan.setFilter(filter);
+```
+
+See the Oracle JavaDoc for [supported RegEx patterns in Java](http://download.oracle.com/javase/6/docs/api/java/util/regex/Pattern.html).
+
+### SubstringComparator
+
+[SubstringComparator](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/SubstringComparator.html) can be used to determine if a given substring exists in a value. The comparison is case-insensitive.
+
+```java
+SubstringComparator comp = new SubstringComparator("y val"); // looking for 'my value'
+SingleColumnValueFilter filter = new SingleColumnValueFilter(
+ cf,
+ column,
+ CompareOperaor.EQUAL,
+ comp
+ );
+scan.setFilter(filter);
+```
+
+### BinaryPrefixComparator
+
+See [BinaryPrefixComparator](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/BinaryPrefixComparator.html).
+
+### BinaryComparator
+
+See [BinaryComparator](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/BinaryComparator.html).
+
+### BinaryComponentComparator
+
+[BinaryComponentComparator](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/BinaryComponentComparator.html) can be used to compare specific value at specific location with in the cell value. The comparison can be done for both ascii and binary data.
+
+```java
+byte[] partialValue = Bytes.toBytes("partial_value");
+int partialValueOffset = 0;
+Filter partialValueFilter = new ValueFilter(CompareFilter.CompareOp.GREATER,
+ new BinaryComponentComparator(partialValue,partialValueOffset));
+```
+
+See [HBASE-22969](https://issues.apache.org/jira/browse/HBASE-22969) for other use cases and details.
+
+## KeyValue Metadata
+
+As HBase stores data internally as KeyValue pairs, KeyValue Metadata Filters evaluate the existence of keys (i.e., ColumnFamily:Column qualifiers) for a row, as opposed to values the previous section.
+
+### FamilyFilter
+
+[FamilyFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/FamilyFilter.html) can be used to filter on the ColumnFamily. It is generally a better idea to select ColumnFamilies in the Scan than to do it with a Filter.
+
+### QualifierFilter
+
+[QualifierFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/QualifierFilter.html) can be used to filter based on Column (aka Qualifier) name.
+
+### ColumnPrefixFilter
+
+[ColumnPrefixFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html) can be used to filter based on the lead portion of Column (aka Qualifier) names.
+
+A ColumnPrefixFilter seeks ahead to the first column matching the prefix in each row and for each involved column family. It can be used to efficiently get a subset of the columns in very wide rows.
+
+Note: The same column qualifier can be used in different column families. This filter returns all matching columns.
+
+Example: Find all columns in a row and family that start with "abc"
+
+```java
+Table t = ...;
+byte[] row = ...;
+byte[] family = ...;
+byte[] prefix = Bytes.toBytes("abc");
+Scan scan = new Scan(row, row); // (optional) limit to one row
+scan.addFamily(family); // (optional) limit to one family
+Filter f = new ColumnPrefixFilter(prefix);
+scan.setFilter(f);
+scan.setBatch(10); // set this if there could be many columns returned
+ResultScanner rs = t.getScanner(scan);
+for (Result r = rs.next(); r != null; r = rs.next()) {
+ for (Cell cell : result.listCells()) {
+ // each cell represents a column
+ }
+}
+rs.close();
+```
+
+### MultipleColumnPrefixFilter
+
+[MultipleColumnPrefixFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html) behaves like ColumnPrefixFilter but allows specifying multiple prefixes.
+
+Like ColumnPrefixFilter, MultipleColumnPrefixFilter efficiently seeks ahead to the first column matching the lowest prefix and also seeks past ranges of columns between prefixes. It can be used to efficiently get discontinuous sets of columns from very wide rows.
+
+Example: Find all columns in a row and family that start with "abc" or "xyz"
+
+```java
+Table t = ...;
+byte[] row = ...;
+byte[] family = ...;
+byte[][] prefixes = new byte[][] {Bytes.toBytes("abc"), Bytes.toBytes("xyz")};
+Scan scan = new Scan(row, row); // (optional) limit to one row
+scan.addFamily(family); // (optional) limit to one family
+Filter f = new MultipleColumnPrefixFilter(prefixes);
+scan.setFilter(f);
+scan.setBatch(10); // set this if there could be many columns returned
+ResultScanner rs = t.getScanner(scan);
+for (Result r = rs.next(); r != null; r = rs.next()) {
+ for (Cell cell : result.listCells()) {
+ // each cell represents a column
+ }
+}
+rs.close();
+```
+
+### ColumnRangeFilter
+
+A [ColumnRangeFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/ColumnRangeFilter.html) allows efficient intra row scanning.
+
+A ColumnRangeFilter can seek ahead to the first matching column for each involved column family. It can be used to efficiently get a 'slice' of the columns of a very wide row. i.e. you have a million columns in a row but you only want to look at columns bbbb-bbdd.
+
+Note: The same column qualifier can be used in different column families. This filter returns all matching columns.
+
+Example: Find all columns in a row and family between "bbbb" (inclusive) and "bbdd" (inclusive)
+
+```java
+Table t = ...;
+byte[] row = ...;
+byte[] family = ...;
+byte[] startColumn = Bytes.toBytes("bbbb");
+byte[] endColumn = Bytes.toBytes("bbdd");
+Scan scan = new Scan(row, row); // (optional) limit to one row
+scan.addFamily(family); // (optional) limit to one family
+Filter f = new ColumnRangeFilter(startColumn, true, endColumn, true);
+scan.setFilter(f);
+scan.setBatch(10); // set this if there could be many columns returned
+ResultScanner rs = t.getScanner(scan);
+for (Result r = rs.next(); r != null; r = rs.next()) {
+ for (Cell cell : result.listCells()) {
+ // each cell represents a column
+ }
+}
+rs.close();
+```
+
+Note: Introduced in HBase 0.92
+
+## RowKey
+
+### RowFilter
+
+It is generally a better idea to use the startRow/stopRow methods on Scan for row selection, however [RowFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/RowFilter.html) can also be used.
+
+You can supplement a scan (both bounded and unbounded) with RowFilter constructed from [BinaryComponentComparator](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/BinaryComponentComparator.html) for further filtering out or filtering in rows. See [HBASE-22969](https://issues.apache.org/jira/browse/HBASE-22969) for use cases and other details.
+
+## Utility
+
+### FirstKeyOnlyFilter
+
+This is primarily used for rowcount jobs. See [FirstKeyOnlyFilter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/FirstKeyOnlyFilter.html).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/client.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/client.mdx
new file mode 100644
index 000000000000..29ada3066d1d
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/client.mdx
@@ -0,0 +1,270 @@
+---
+title: "Client"
+description: "HBase client architecture, connection management, metadata caching, and client-side configuration for optimal performance."
+---
+
+The HBase client finds the RegionServers that are serving the particular row range of interest. It does this by querying the `hbase:meta` table. See [hbase:meta](/docs/architecture/catalog-tables#hbasemeta) for details. After locating the required region(s), the client contacts the RegionServer serving that region, rather than going through the master, and issues the read or write request. This information is cached in the client so that subsequent requests need not go through the lookup process. Should a region be reassigned either by the master load balancer or because a RegionServer has died, the client will requery the catalog tables to determine the new location of the user region.
+
+See [Runtime Impact](/docs/architecture/master#runtime-impact) for more information about the impact of the Master on HBase Client communication.
+
+Administrative functions are done via an instance of [Admin](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Admin.html)
+
+## Cluster Connections
+
+The API changed in HBase 1.0. For connection configuration information, see [Client configuration and dependencies connecting to an HBase cluster](/docs/configuration/default#client-configuration-and-dependencies-connecting-to-an-hbase-cluster).
+
+### API as of HBase 1.0.0
+
+It's been cleaned up and users are returned Interfaces to work against rather than particular types. In HBase 1.0, obtain a `Connection` object from `ConnectionFactory` and thereafter, get from it instances of `Table`, `Admin`, and `RegionLocator` on an as-need basis. When done, close the obtained instances. Finally, be sure to cleanup your `Connection` instance before exiting. `Connections` are heavyweight objects but thread-safe so you can create one for your application and keep the instance around. `Table`, `Admin` and `RegionLocator` instances are lightweight. Create as you go and then let go as soon as you are done by closing them. See the [Client Package Javadoc Description](https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/package-summary.html) for example usage of the new HBase 1.0 API.
+
+### API before HBase 1.0.0
+
+Instances of `HTable` are the way to interact with an HBase cluster earlier than 1.0.0. _[Table](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html) instances are not thread-safe_. Only one thread can use an instance of Table at any given time. When creating Table instances, it is advisable to use the same [HBaseConfiguration](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/HBaseConfiguration) instance. This will ensure sharing of ZooKeeper and socket instances to the RegionServers which is usually what you want. For example, this is preferred:
+
+```java
+HBaseConfiguration conf = HBaseConfiguration.create();
+HTable table1 = new HTable(conf, "myTable");
+HTable table2 = new HTable(conf, "myTable");
+```
+
+as opposed to this:
+
+```java
+HBaseConfiguration conf1 = HBaseConfiguration.create();
+HTable table1 = new HTable(conf1, "myTable");
+HBaseConfiguration conf2 = HBaseConfiguration.create();
+HTable table2 = new HTable(conf2, "myTable");
+```
+
+For more information about how connections are handled in the HBase client, see [ConnectionFactory](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html).
+
+#### Connection Pooling
+
+For applications which require high-end multithreaded access (e.g., web-servers or application servers that may serve many application threads in a single JVM), you can pre-create a `Connection`, as shown in the following example:
+
+**Example 24. Pre-Creating a `Connection`**
+
+```java
+// Create a connection to the cluster.
+Configuration conf = HBaseConfiguration.create();
+try (Connection connection = ConnectionFactory.createConnection(conf);
+ Table table = connection.getTable(TableName.valueOf(tablename))) {
+ // use table as needed, the table returned is lightweight
+}
+```
+
+
+ Previous versions of this guide discussed `HTablePool`, which was deprecated in HBase 0.94, 0.95,
+ and 0.96, and removed in 0.98.1, by
+ [HBASE-6580](https://issues.apache.org/jira/browse/HBASE-6580), or `HConnection`, which is
+ deprecated in HBase 1.0 by `Connection`. Please use
+ [Connection](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Connection.html)
+ instead.
+
+
+## WriteBuffer and Batch Methods
+
+In HBase 1.0 and later, [HTable](https://hbase.apache.org/1.4/devapidocs/org/apache/hadoop/hbase/client/HTable.html) is deprecated in favor of [Table](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html). `Table` does not use autoflush. To do buffered writes, use the BufferedMutator class.
+
+In HBase 2.0 and later, [HTable](https://hbase.apache.org/2.6/devapidocs/org/apache/hadoop/hbase/client/HTable.html) does not use BufferedMutator to execute the `Put` operation. Refer to [HBASE-18500](https://issues.apache.org/jira/browse/HBASE-18500) for more information.
+
+For additional information on write durability, review the [ACID semantics](/acid-semantics) page.
+
+For fine-grained control of batching of `Put`s or `Delete`s, see the [batch]() methods on Table.
+
+## Asynchronous Client
+
+It is a new API introduced in HBase 2.0 which aims to provide the ability to access HBase asynchronously.
+
+You can obtain an `AsyncConnection` from `ConnectionFactory`, and then get a asynchronous table instance from it to access HBase. When done, close the `AsyncConnection` instance(usually when your program exits).
+
+For the asynchronous table, most methods have the same meaning with the old `Table` interface, expect that the return value is wrapped with a CompletableFuture usually. We do not have any buffer here so there is no close method for asynchronous table, you do not need to close it. And it is thread safe.
+
+There are several differences for scan:
+
+- There is still a `getScanner` method which returns a `ResultScanner`. You can use it in the old way and it works like the old `ClientAsyncPrefetchScanner`.
+- There is a `scanAll` method which will return all the results at once. It aims to provide a simpler way for small scans which you want to get the whole results at once usually.
+- The Observer Pattern. There is a scan method which accepts a `ScanResultConsumer` as a parameter. It will pass the results to the consumer.
+
+Notice that `AsyncTable` interface is templatized. The template parameter specifies the type of `ScanResultConsumerBase` used by scans, which means the observer style scan APIs are different. The two types of scan consumers are - `ScanResultConsumer` and `AdvancedScanResultConsumer`.
+
+`ScanResultConsumer` needs a separate thread pool which is used to execute the callbacks registered to the returned CompletableFuture. Because the use of separate thread pool frees up RPC threads, callbacks are free to do anything. Use this if the callbacks are not quick, or when in doubt.
+
+`AdvancedScanResultConsumer` executes callbacks inside the framework thread. It is not allowed to do time consuming work in the callbacks else it will likely block the framework threads and cause very bad performance impact. As its name, it is designed for advanced users who want to write high performance code. See `org.apache.hadoop.hbase.client.example.HttpProxyExample` for how to write fully asynchronous code with it.
+
+## Asynchronous Admin
+
+You can obtain an `AsyncConnection` from `ConnectionFactory`, and then get a `AsyncAdmin` instance from it to access HBase. Notice that there are two `getAdmin` methods to get a `AsyncAdmin` instance. One method has one extra thread pool parameter which is used to execute callbacks. It is designed for normal users. Another method doesn't need a thread pool and all the callbacks are executed inside the framework thread so it is not allowed to do time consuming works in the callbacks. It is designed for advanced users.
+
+The default `getAdmin` methods will return a `AsyncAdmin` instance which use default configs. If you want to customize some configs, you can use `getAdminBuilder` methods to get a `AsyncAdminBuilder` for creating `AsyncAdmin` instance. Users are free to only set the configs they care about to create a new `AsyncAdmin` instance.
+
+For the `AsyncAdmin` interface, most methods have the same meaning with the old `Admin` interface, expect that the return value is wrapped with a CompletableFuture usually.
+
+For most admin operations, when the returned CompletableFuture is done, it means the admin operation has also been done. But for compact operation, it only means the compact request was sent to HBase and may need some time to finish the compact operation. For `rollWALWriter` method, it only means the rollWALWriter request was sent to the region server and may need some time to finish the `rollWALWriter` operation.
+
+For region name, we only accept `byte[]` as the parameter type and it may be a full region name or a encoded region name. For server name, we only accept `ServerName` as the parameter type. For table name, we only accept `TableName` as the parameter type. For `list*` operations, we only accept `Pattern` as the parameter type if you want to do regex matching.
+
+## External Clients
+
+Information on non-Java clients and custom protocols is covered in [Apache HBase External APIs](/docs/external-apis)
+
+## Master Registry (new as of 2.3.0)
+
+Starting from 2.5.0, MasterRegistry is deprecated. It's functionality is completely superseded by the RpcConnectionRegistry. Please see [Rpc Connection Registry (new as of 2.5.0)](/docs/architecture/client#rpc-connection-registry-new-as-of-250) for more details.
+
+Client internally works with a _connection registry_ to fetch the metadata needed by connections. This connection registry implementation is responsible for fetching the following metadata.
+
+- Active master address
+- Current meta region(s) locations
+- Cluster ID (unique to this cluster)
+
+This information is needed as a part of various client operations like connection set up, scans, gets, etc. Traditionally, the connection registry implementation has been based on ZooKeeper as the source of truth and clients fetched the metadata directly from the ZooKeeper quorum. HBase 2.3.0 introduces a new connection registry implementation based on direct communication with the Masters. With this implementation, clients now fetch required metadata via master RPC end points instead of maintaining connections to ZooKeeper. This change was done for the following reasons.
+
+- Reduce load on ZooKeeper since that is critical for cluster operation.
+- Holistic client timeout and retry configurations since the new registry brings all the client operations under HBase rpc framework.
+- Remove the ZooKeeper client dependency on HBase client library.
+
+This means:
+
+- At least a single active or stand by master is needed for cluster connection setup. Refer to [Runtime Impact](/docs/architecture/master#runtime-impact) for more details.
+- Master can be in a critical path of read/write operations, especially if the client metadata cache is empty or stale.
+- There is higher connection load on the masters that before since the clients talk directly to HMasters instead of ZooKeeper ensemble\`
+
+To reduce hot-spotting on a single master, all the masters (active & stand-by) expose the needed service to fetch the connection metadata. This lets the client connect to any master (not just active). Both ZooKeeper-based and Master-based connection registry implementations are available in 2.3+. For 2.x and earlier, the ZooKeeper-based implementation remains the default configuration. For 3.0.0, RpcConnectionRegistry becomes the default configuration, as the alternate to MasterRegistry.
+
+Change the connection registry implementation by updating the value configured for `hbase.client.registry.impl`. To explicitly enable the ZooKeeper-based registry, use
+
+```xml
+
+ hbase.client.registry.impl
+ org.apache.hadoop.hbase.client.ZKConnectionRegistry
+
+```
+
+To explicitly enable the Master-based registry, use
+
+```xml
+
+ hbase.client.registry.impl
+ org.apache.hadoop.hbase.client.MasterRegistry
+
+```
+
+### MasterRegistry RPC hedging
+
+MasterRegistry implements hedging of connection registry RPCs across active and stand-by masters. This lets the client make the same request to multiple servers and which ever responds first is returned back to the client immediately. This improves performance, especially when a subset of servers are under load. The hedging fan out size is configurable, meaning the number of requests that are hedged in a single attempt, using the configuration key _hbase.client.master_registry.hedged.fanout_ in the client configuration. It defaults to 2. With this default, the RPCs are tried in batches of 2. The hedging policy is still primitive and does not adapt to any sort of live rpc performance metrics.
+
+### Additional Notes
+
+- Clients hedge the requests in a randomized order to avoid hot-spotting a single master.
+- Cluster internal connections (masters ↔ regionservers) still use ZooKeeper based connection registry.
+- Cluster internal state is still tracked in Zookeeper, hence ZK availability requirements are same as before.
+- Inter cluster replication still uses ZooKeeper based connection registry to simplify configuration management.
+
+For more implementation details, please refer to the [design doc](https://github.com/apache/hbase/tree/master/dev-support/design-docs) and [HBASE-18095](https://issues.apache.org/jira/browse/HBASE-18095).
+
+## Rpc Connection Registry (new as of 2.5.0)
+
+As said in the [Master Registry (new as of 2.3.0)](/docs/architecture/client#masterregistry-rpc-hedging) section, there are some disadvantages and limitations for MasterRegistry, especially that it puts master in the critical path of read/write operations. In order to address these problems, we introduced a more generic RpcConnectionRegistry.
+
+It is also rpc based, like MasterRegistry, with several differences
+
+1. Region server also implements the necessary rpc service, so you can config any nodes in the cluster as bootstrap nodes, not only masters
+2. Support refreshing bootstrap nodes, for spreading loads across the nodes in the cluster, and also remove the dead nodes in bootstrap nodes.
+
+To explicitly enable the rpc-based registry, use
+
+```xml
+
+ hbase.client.registry.impl
+ org.apache.hadoop.hbase.client.RpcConnectionRegistry
+
+```
+
+To configure the bootstrap nodes, use
+
+```xml
+
+ hbase.client.bootstrap.servers
+ server1:16020,server2:16020,server3:16020
+
+```
+
+If not configured, we will fallback to use master addresses as the bootstrap nodes.
+
+RpcConnectionRegistry is available in 2.5+, and becomes the default client registry implementation in 3.0.0.
+
+### RpcConnectionRegistry RPC hedging
+
+Hedged read is still supported, the configuration key is now _hbase.client.bootstrap.hedged.fanout_, and its default value is still 2.
+
+### RpcConnectionRegistry bootstrap nodes refreshing
+
+There are basically two reasons for us to refresh the bootstrap nodes
+
+- Periodically. This is for spreading loads across the nodes in the cluster. There are two configurations
+ 1. _hbase.client.bootstrap.refresh_interval_secs_: the refresh interval in seconds, default 300. A value less than or equal to zero means disable refreshing.
+ 2. _hbase.client.bootstrap.initial_refresh_delay_secs_: the initial refresh interval in seconds, the default value is 1/10 of _hbase.client.bootstrap.refresh_interval_secs_. The reason why we want to introduce a separated configuration for the delay for first refreshing is that, as end users could configure any nodes in a cluster as the initial bootstrap nodes, it is possible that different end users will configure the same machine which makes the machine over load. So we should have a shorter delay for the initial refresh, to let users quickly switch to the bootstrap nodes we want them to connect to.
+
+- When there is a connection error while requesting the nodes, we will refresh immediately, to remove the dead nodes. To avoid putting too much pressure to the cluster, there is a configuration _hbase.client.bootstrap.min_secs_between_refreshes_, to control the minimum interval between two refreshings. The default value is 60, but notice that, if you change _hbase.client.bootstrap.refresh_interval_secs_ to a small value, you need to make sure to also change _hbase.client.bootstrap.min_secs_between_refreshes_ to a value smaller than _hbase.client.bootstrap.refresh_interval_secs_, otherwise an IllegalArgumentException will be thrown.
+
+
+ (Advanced) In case of any issues with the rpc/master based registry, use the following
+ configuration to fallback to the ZooKeeper based connection registry implementation.
+
+
+```xml
+
+ hbase.client.registry.impl
+ org.apache.hadoop.hbase.client.ZKConnectionRegistry
+
+```
+
+## Connection URI
+
+Starting from 2.7.0, we add the support for specifying the connection information for a HBase cluster through an URI, which we call a "connection URI". And we've added several methods in _ConnectionFactory_ to let you get a connection to the cluster specified by the URI. It looks like:
+
+```java
+URI uri = new URI("hbase+rpc://server1:16020,server2:16020,server3:16020");
+try (Connection conn = ConnectionFactory.createConnection(uri)) {
+ ...
+}
+```
+
+### Supported Schemes
+
+Currently there are two schemes supported, _hbase+rpc_ for _RpcConnectionRegistry_ and _hbase+zk_ for _ZKConnectionRegistry_. _MasterRegistry_ is deprecated so we do not expose it through connection URI.
+
+For _hbase+rpc_, it looks like
+
+```shell
+hbase+rpc://server1:16020,server2:16020,server3:16020
+```
+
+The authority part _server1:16020,server2:16020,server3:16020_ specifies the bootstrap nodes and their rpc ports, i.e, the configuration value for _hbase.client.bootstrap.servers_ in the past.
+
+For _hbase+zk_, it looks like
+
+```shell
+hbase+zk://zk1:2181,zk2:2181,zk3:2181/hbase
+```
+
+The authority part _zk1:2181,zk2:2181,zk3:2181_ is the zk quorum, i.e, the configuration value for _hbase.zookeeper.quorum_ in the past. The path part _/hbase_ is the znode parent, i.e, the configuration value for _zookeeper.znode.parent_ in the past.
+
+### Specify Configuration through URI Queries
+
+To let users fully specify the connection information through a connection URI, we support specifying configuration values through URI Queries. It looks like:
+
+```shell
+hbase+rpc://server1:16020?hbase.client.operation.timeout=10000
+```
+
+In this way you can set the operation timeout to 10 seconds. Notice that, the configuration values specified in the connection URI will override the ones in the configuration file.
+
+### Implement Your Own Connection Registry
+
+We use _ServiceLoader_ to load different connection registry implementations, the entry point is _org.apache.hadoop.hbase.client.ConnectionRegistryURIFactory_. So if you implement your own _ConnectionRegistryURIFactory_ which has a different scheme, and register it in the services file, we can load it at runtime.
+
+Connection URI is still a very new feature which has not been used extensively in production, so we do not want to expose the ability to customize _ConnectionRegistryURIFactory_ yet as the API may be changed frequently in the beginning.
+
+If you really want to implement your own connection registry, you can use the above way but take your own risk.
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/hbase-mob.mdx
similarity index 71%
rename from src/main/asciidoc/_chapters/hbase_mob.adoc
rename to hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/hbase-mob.mdx
index 0e09db11a18d..5fc1a287fc9a 100644
--- a/src/main/asciidoc/_chapters/hbase_mob.adoc
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/hbase-mob.mdx
@@ -1,33 +1,7 @@
-////
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-////
-
-[[hbase_mob]]
-== Storing Medium-sized Objects (MOB)
-:doctype: book
-:numbered:
-:toc: left
-:icons: font
-:experimental:
-:toc: left
-:source-language: java
+---
+title: "Storing Medium-sized Objects (MOB)"
+description: "Optimized storage and handling of medium-sized objects (100KB-10MB) in HBase using the MOB feature for improved performance."
+---
Data comes in many sizes, and saving all of your data in HBase, including binary
data such as images and documents, is ideal. While HBase can technically handle
@@ -36,16 +10,16 @@ read and write paths are optimized for values smaller than 100KB in size. When
HBase deals with large numbers of objects over this threshold, referred to here
as medium objects, or MOBs, performance is degraded due to write amplification
caused by splits and compactions. When using MOBs, ideally your objects will be between
-100KB and 10MB (see the <>). HBase 2 added special internal handling of MOBs
+100KB and 10MB (see the [faq](/docs/faq)). HBase 2 added special internal handling of MOBs
to maintain performance, consistency, and low operational overhead. MOB support is
-provided by the work done in link:https://issues.apache.org/jira/browse/HBASE-11339[HBASE-11339].
-To take advantage of MOB, you need to use <>. Optionally,
+provided by the work done in [HBASE-11339](https://issues.apache.org/jira/browse/HBASE-11339).
+To take advantage of MOB, you need to use [HFile version 3](/docs/hfile-format#hbase-file-format-with-security-enhancements-version-3). Optionally,
configure the MOB file reader's cache settings for each RegionServer (see
-<>), then configure specific columns to hold MOB data.
+[Configure the MOB Cache](/docs/architecture/hbase-mob#configuring-the-mob-cache)), then configure specific columns to hold MOB data.
Client code does not need to change to take advantage of HBase MOB support. The
feature is transparent to the client.
-=== Configuring Columns for MOB
+## Configuring Columns for MOB
You can configure columns to support MOB during table creation or alteration,
either in HBase Shell or via the Java API. The two relevant properties are the
@@ -53,58 +27,56 @@ boolean `IS_MOB` and the `MOB_THRESHOLD`, which is the number of bytes at which
an object is considered to be a MOB. Only `IS_MOB` is required. If you do not
specify the `MOB_THRESHOLD`, the default threshold value of 100 KB is used.
-.Configure a Column for MOB Using HBase Shell
-----
+### Configure a Column for MOB Using HBase Shell
+
+```bash
hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400}
hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400}
-----
+```
+
+### Configure a Column for MOB Using the Java API
-.Configure a Column for MOB Using the Java API
-====
-[source,java]
-----
+```java
...
-HColumnDescriptor hcd = new HColumnDescriptor(“f”);
+HColumnDescriptor hcd = new HColumnDescriptor("f");
hcd.setMobEnabled(true);
...
hcd.setMobThreshold(102400L);
...
-----
-====
+```
-=== Testing MOB
+## Testing MOB
The utility `org.apache.hadoop.hbase.IntegrationTestIngestWithMOB` is provided to assist with testing
the MOB feature. The utility is run as follows:
-[source,bash]
-----
+
+```bash
$ sudo -u hbase hbase org.apache.hadoop.hbase.IntegrationTestIngestWithMOB \
- -threshold 1024 \
- -minMobDataSize 512 \
- -maxMobDataSize 5120
-----
+ -threshold 1024 \
+ -minMobDataSize 512 \
+ -maxMobDataSize 5120
+```
-* `*threshold*` is the threshold at which cells are considered to be MOBs.
- The default is 1 kB, expressed in bytes.
-* `*minMobDataSize*` is the minimum value for the size of MOB data.
- The default is 512 B, expressed in bytes.
-* `*maxMobDataSize*` is the maximum value for the size of MOB data.
- The default is 5 kB, expressed in bytes.
+- **`threshold`** is the threshold at which cells are considered to be MOBs.
+ The default is 1 kB, expressed in bytes.
+- **`minMobDataSize`** is the minimum value for the size of MOB data.
+ The default is 512 B, expressed in bytes.
+- **`maxMobDataSize`** is the maximum value for the size of MOB data.
+ The default is 5 kB, expressed in bytes.
-=== MOB architecture
+## MOB architecture
This section is derived from information found in
-link:https://issues.apache.org/jira/browse/HBASE-11339[HBASE-11339], which covered the initial GA
+[HBASE-11339](https://issues.apache.org/jira/browse/HBASE-11339), which covered the initial GA
implementation of MOB in HBase and
-link:https://issues.apache.org/jira/browse/HBASE-22749[HBASE-22749], which improved things by
+[HBASE-22749](https://issues.apache.org/jira/browse/HBASE-22749), which improved things by
parallelizing MOB maintenance across the RegionServers. For more information see
the last version of the design doc created during the initial work,
-"link:https://github.com/apache/hbase/blob/master/dev-support/design-docs/HBASE-11339%20MOB%20GA%20design.pdf[HBASE-11339 MOB GA design.pdf]",
+"[HBASE-11339 MOB GA design.pdf](https://github.com/apache/hbase/blob/master/dev-support/design-docs/HBASE-11339%20MOB%20GA%20design.pdf)",
and the design doc for the distributed mob compaction feature,
-"link:https://github.com/apache/hbase/blob/master/dev-support/design-docs/HBASE-22749%20MOB%20distributed%20compaction.pdf[HBASE-22749 MOB distributed compaction.pdf]".
-
+"[HBASE-22749 MOB distributed compaction.pdf](https://github.com/apache/hbase/blob/master/dev-support/design-docs/HBASE-22749%20MOB%20distributed%20compaction.pdf)".
-==== Overview
+### Overview [#hbase-mob-overview]
The MOB feature reduces the overall IO load for configured column families by storing values that
are larger than the configured threshold outside of the normal regions to avoid splits, merges, and
@@ -132,16 +104,16 @@ requests. When these MOB hfiles are created from a flush or MOB compaction they
dedicated mob data area under the hbase root directory specific to the namespace, table, mob
logical region, and column family. In general that means a path structured like:
-----
+```
%HBase Root Dir%/mobdir/data/%namespace%/%table%/%logical region%/%column family%/
-----
+```
With default configs, an example table named 'some_table' in the
default namespace with a MOB enabled column family named 'foo' this HDFS directory would be
-----
+```
/hbase/mobdir/data/default/some_table/372c1b27e3dc0b56c3a031926e5efbe9/foo/
-----
+```
These MOB hfiles are maintained by special chores in the HBase Master and across the individual
Region Servers. Specifically those chores take care of enforcing TTLs and compacting them. Note that
@@ -153,16 +125,16 @@ the Master will take care of moving it to the archive just
like any normal hfile. Because the table's mob region is independent of all the normal regions it
can coexist with them in the regular archive storage area:
-----
+```
/hbase/archive/data/default/some_table/372c1b27e3dc0b56c3a031926e5efbe9/foo/
-----
+```
The same hfile cleaning chores that take care of eventually deleting unneeded archived files from
normal regions thus also will take care of these MOB hfiles. As such, if there is a snapshot of a
MOB enabled table then the cleaning system will make sure those MOB files stick around in the
archive area as long as they are needed by a snapshot or a clone of a snapshot.
-==== MOB compaction
+### MOB compaction
Each time the memstore for a MOB enabled column family performs a flush HBase will write values over
the MOB threshold into MOB specific hfiles. When normal region compaction occurs the Region Server
@@ -189,16 +161,13 @@ the advantage of looking across all active cells for the region our several smal
end up as a single MOB file per region. The chore defaults to running weekly and can be
configured by setting `hbase.mob.compaction.chore.period` to the desired period in seconds.
-====
-[source,xml]
-----
+```xml
- hbase.mob.compaction.chore.period
- 2592000
- Example of changing the chore period from a week to a month.
+ hbase.mob.compaction.chore.period
+ 2592000
+ Example of changing the chore period from a week to a month.
-----
-====
+```
By default, the periodic MOB compaction coordination chore will attempt to keep every region
busy doing compactions in parallel in order to maximize the amount of work done on the cluster.
@@ -208,18 +177,15 @@ can control how many concurrent region-level compaction requests are allowed by
the configuration to 0 then you will get the default behavior of attempting to do all regions in
parallel.
-====
-[source,xml]
-----
+```xml
- hbase.mob.major.compaction.region.batch.size
- 1
- Example of switching from "as parallel as possible" to "serially"
+ hbase.mob.major.compaction.region.batch.size
+ 1
+ Example of switching from "as parallel as possible" to "serially"
-----
-====
+```
-==== MOB file archiving
+### MOB file archiving
Eventually we will have MOB hfiles that are no longer needed. Either clients will overwrite the
value or a MOB-rewriting compaction will store a reference to a newer larger MOB hfile. Because any
@@ -229,8 +195,8 @@ to archive MOB hfiles. Instead a periodic chore in the Master evaluates MOB hfil
A MOB HFile will be subject to archiving under any of the following conditions:
-* Any MOB HFile older than the column family's TTL
-* Any MOB HFile older than a "too recent" threshold with no references to it from the regular hfiles
+- Any MOB HFile older than the column family's TTL
+- Any MOB HFile older than a "too recent" threshold with no references to it from the regular hfiles
for all regions in a column family
To determine if a MOB HFile meets the second criteria the chore extracts metadata from the regular
@@ -242,9 +208,9 @@ positive integer number of seconds. It defaults to running daily. You should not
unless you have a very aggressive TTL or a very high rate of MOB updates with a correspondingly
high rate of non-MOB compactions.
-=== MOB Optimization Tasks
+## MOB Optimization Tasks
-==== Further limiting write amplification
+### Further limiting write amplification
If your MOB workload has few to no updates or deletes then you can opt-in to MOB compactions that
optimize for limiting the amount of write amplification. It achieves this by setting a
@@ -253,38 +219,34 @@ through MOB compaction it will evaluate the size of the MOB file that currently
value and skip rewriting the value if that file is over threshold.
The bound of write amplification in this mode can be approximated as
-stem:["Write Amplification" = log_K(M/S)] where *K* is the number of files in compaction
-selection, *M* is the configurable threshold for MOB files size, and *S* is the minmum size of
+"Write Amplification" = $\log_{K}\!\left(\frac{M}{S}\right)$ where **K** is the number of files in compaction
+selection, **M** is the configurable threshold for MOB files size, and **S** is the minmum size of
memstore flushes that create MOB files in the first place. For example given 5 files picked up per
compaction, a threshold of 1 GB, and a flush size of 10MB the write amplification will be
-stem:[log_5((1GB)/(10MB)) = log_5(100) = 2.86].
+$\log\\_{5}\!\left(\frac{1\,\text{GB}}{10\,\text{MB}}\right) = \log\\_{5}(100) \approx 2.86$.
If we are using an underlying filesystem with a limitation on the number of files, such as HDFS,
and we know our expected data set size we can choose our maximum file size in order to approach
this limit but stay within it in order to minimize write amplification. For example, if we expect to
store a petabyte and we have a conservative limitation of a million files in our HDFS instance, then
-stem:[(1PB)/(1M) = 1GB] gives us a target limitation of a gigabyte per MOB file.
+$\frac{1\,\text{PB}}{1\,\text{M}} = 1\,\text{GB}$ gives us a target limitation of a gigabyte per MOB file.
To opt-in to this compaction mode you must set `hbase.mob.compaction.type` to `optimized`. The
default MOB size threshold in this mode is set to 1GB. It can be changed by setting
`hbase.mob.compactions.max.file.size` to a positive integer number of bytes.
-
-====
-[source,xml]
-----
+```xml
- hbase.mob.compaction.type
- optimized
- opt-in to write amplification optimized mob compaction.
+ hbase.mob.compaction.type
+ optimized
+ opt-in to write amplification optimized mob compaction.
- hbase.mob.compactions.max.file.size
- 10737418240
- Example of tuning the max mob file size to 10GB
+ hbase.mob.compactions.max.file.size
+ 10737418240
+ Example of tuning the max mob file size to 10GB
-----
-====
+```
Additionally, when operating in this mode the compaction process will seek to avoid writing MOB
files that are over the max file threshold. As it is writing out a additional MOB values into a MOB
@@ -293,18 +255,15 @@ When the hfile of MOB values reaches limit, the MOB hfile is committed to the MO
a new one is created. The hfile with reference cells will track the complete set of MOB hfiles it
needs in its metadata.
-.Be mindful of total time to complete compaction of a region
-[WARNING]
-====
-When using the write amplification optimized compaction mode you need to watch for the maximum time
-to compact a single region. If it nears an hour you should read through the troubleshooting section
-below <>. Failure to make the adjustments discussed there could
-lead to dataloss.
-====
-
-[[mob.cache.configure]]
-==== Configuring the MOB Cache
+
+ When using the write amplification optimized compaction mode you need to watch for the maximum
+ time to compact a single region. If it nears an hour you should read through the troubleshooting
+ section below [Adjusting the MOB cleaner's tolerance for new
+ hfiles](/docs/architecture/hbase-mob#adjusting-the-mob-cleaners-tolerance-for-new-hfiles). Failure
+ to make the adjustments discussed there could lead to dataloss.
+
+### Configuring the MOB Cache
Because there can be a large number of MOB files at any time, as compared to the number of HFiles,
MOB files are not always kept open. The MOB file reader cache is a LRU cache which keeps the most
@@ -312,44 +271,42 @@ recently used MOB files open. To configure the MOB file reader's cache on each R
the following properties to the RegionServer's `hbase-site.xml`, customize the configuration to
suit your environment, and restart or rolling restart the RegionServer.
-.Example MOB Cache Configuration
-====
-[source,xml]
-----
+#### Example MOB Cache Configuration
+
+```xml
- hbase.mob.file.cache.size
- 1000
-
- Number of opened file handlers to cache.
- A larger value will benefit reads by providing more file handlers per mob
- file cache and would reduce frequent file opening and closing.
- However, if this is set too high, this could lead to a "too many opened file handers"
- The default value is 1000.
-
+ hbase.mob.file.cache.size
+ 1000
+
+ Number of opened file handlers to cache.
+ A larger value will benefit reads by providing more file handlers per mob
+ file cache and would reduce frequent file opening and closing.
+ However, if this is set too high, this could lead to a "too many opened file handers"
+ The default value is 1000.
+
- hbase.mob.cache.evict.period
- 3600
-
- The amount of time in seconds after which an unused file is evicted from the
- MOB cache. The default value is 3600 seconds.
-
+ hbase.mob.cache.evict.period
+ 3600
+
+ The amount of time in seconds after which an unused file is evicted from the
+ MOB cache. The default value is 3600 seconds.
+
- hbase.mob.cache.evict.remain.ratio
- 0.5f
-
- A multiplier (between 0.0 and 1.0), which determines how many files remain cached
- after the threshold of files that remains cached after a cache eviction occurs
- which is triggered by reaching the `hbase.mob.file.cache.size` threshold.
- The default value is 0.5f, which means that half the files (the least-recently-used
- ones) are evicted.
-
+ hbase.mob.cache.evict.remain.ratio
+ 0.5f
+
+ A multiplier (between 0.0 and 1.0), which determines how many files remain cached
+ after the threshold of files that remains cached after a cache eviction occurs
+ which is triggered by reaching the `hbase.mob.file.cache.size` threshold.
+ The default value is 0.5f, which means that half the files (the least-recently-used
+ ones) are evicted.
+
-----
-====
+```
-==== Manually Compacting MOB Files
+### Manually Compacting MOB Files
To manually compact MOB files, rather than waiting for the
periodic chore to trigger compaction, use the
@@ -358,17 +315,16 @@ require the first argument to be the table name, and take a column
family as the second argument. If used with a column family that includes MOB data, then
these operator requests will result in the MOB data being compacted.
-----
+```bash
hbase> major_compact 't1'
-hbase> major_compact 't2', 'c1’
-----
+hbase> major_compact 't2', 'c1'
+```
This same request can be made via the `Admin.majorCompact` Java API.
-=== MOB Troubleshooting
+## MOB Troubleshooting
-[[mob.troubleshoot.cleaner.toonew]]
-==== Adjusting the MOB cleaner's tolerance for new hfiles
+### Adjusting the MOB cleaner's tolerance for new hfiles
The MOB cleaner chore ignores all MOB hfiles that were created more recently than an hour prior to
the start of the chore to ensure we don't miss the reference metadata from the corresponding regular
@@ -393,27 +349,24 @@ references it. Such a delay should not happen with a normally configured and hea
The cleaner's window for "too recent" is controlled by setting `hbase.mob.min.age.archive` to a
positive integer number of milliseconds.
-====
-[source,xml]
-----
+```xml
- hbase.mob.min.age.archive
- 86400000
- Example of tuning the cleaner to only archive files older than a day.
+ hbase.mob.min.age.archive
+ 86400000
+ Example of tuning the cleaner to only archive files older than a day.
-----
-====
+```
-==== Retrieving MOB metadata through the HBase Shell
+### Retrieving MOB metadata through the HBase Shell
While working on troubleshooting failures in the MOB system you can retrieve some of the internal
information through the HBase shell by specifying special attributes on a scan.
-----
+```ruby
hbase(main):112:0> scan 'some_table', {STARTROW => '00012-example-row-key', LIMIT => 1,
hbase(main):113:1* CACHE_BLOCKS => false, ATTRIBUTES => { 'hbase.mob.scan.raw' => '1',
hbase(main):114:2* 'hbase.mob.scan.ref.only' => '1' } }
-----
+```
The MOB internal information is stored as four bytes for the size of the underlying cell value and
then a UTF8 string with the name of the MOB HFile that contains the underlying cell value. Note that
@@ -424,15 +377,15 @@ characters.
Let's look at a specific example:
-----
+```ruby
hbase(main):112:0> scan 'some_table', {STARTROW => '00012-example-row-key', LIMIT => 1,
hbase(main):113:1* CACHE_BLOCKS => false, ATTRIBUTES => { 'hbase.mob.scan.raw' => '1',
hbase(main):114:2* 'hbase.mob.scan.ref.only' => '1' } }
ROW COLUMN+CELL
00012-example-row-key column=foo:bar, timestamp=1511179764, value=\x00\x02|\x94d41d8cd98f00b204
- e9800998ecf8427e19700118ffd9c244fe69488bbc9f2c77d24a3e6a
+ e9800998ecf8427e19700118ffd9c244fe69488bbc9f2c77d24a3e6a
1 row(s) in 0.0130 seconds
-----
+```
In this case the first four bytes are `\x00\x02|\x94` which corresponds to the bytes
`[0x00, 0x02, 0x7C, 0x94]`. (Note that the third byte was printed as the ASCII character '|'.)
@@ -450,13 +403,14 @@ and you'll either need to already know the lineage of your table or you'll need
tables.
Assuming you are authenticated as a user with HBase superuser rights, you can search for it:
-----
+
+```bash
$> hdfs dfs -find /hbase -name \
- d41d8cd98f00b204e9800998ecf8427e19700118ffd9c244fe69488bbc9f2c77d24a3e6a
+d41d8cd98f00b204e9800998ecf8427e19700118ffd9c244fe69488bbc9f2c77d24a3e6a
/hbase/mobdir/data/default/some_table/372c1b27e3dc0b56c3a031926e5efbe9/foo/d41d8cd98f00b204e9800998ecf8427e19700118ffd9c244fe69488bbc9f2c77d24a3e6a
-----
+```
-==== Moving a column family out of MOB
+### Moving a column family out of MOB
If you want to disable MOB on a column family you must ensure you instruct HBase to migrate the data
out of the MOB system prior to turning the feature off. If you fail to do this HBase will return the
@@ -467,68 +421,99 @@ The following procedure will safely migrate the underlying data without requirin
Clients will see a number of retries when configuration settings are applied and regions are
reloaded.
-.Procedure: Stop MOB maintenance, change MOB threshold, rewrite data via compaction
-. Ensure the MOB compaction chore in the Master is off by setting
+#### Procedure: Stop MOB maintenance, change MOB threshold, rewrite data via compaction
+
+
+
+
+
+Ensure the MOB compaction chore in the Master is off by setting
`hbase.mob.compaction.chore.period` to `0`. Applying this configuration change will require a
rolling restart of HBase Masters. That will require at least one fail-over of the active master,
which may cause retries for clients doing HBase administrative operations.
-. Ensure no MOB compactions are issued for the table via the HBase shell for the duration of this
+
+
+
+
+
+Ensure no MOB compactions are issued for the table via the HBase shell for the duration of this
migration.
-. Use the HBase shell to change the MOB size threshold for the column family you are migrating to a
+
+
+
+
+
+##### Change the MOB size threshold
+
+Use the HBase shell to change the MOB size threshold for the column family you are migrating to a
value that is larger than the largest cell present in the column family. E.g. given a table named
'some_table' and a column family named 'foo' we can pick one gigabyte as an arbitrary "bigger than
what we store" value:
-+
-----
-hbase(main):011:0> alter 'some_table', {NAME => 'foo', MOB_THRESHOLD => '1000000000'}
-Updating all regions with the new schema...
-9/25 regions updated.
-25/25 regions updated.
-Done.
-0 row(s) in 3.4940 seconds
-----
-+
-Note that if you are still ingesting data you must ensure this threshold is larger than any cell
-value you might write; MAX_INT would be a safe choice.
-
-. Perform a major compaction on the table. Specifically you are performing a "normal" compaction and
-not a MOB compaction.
-+
-----
+
+```ruby
+ hbase(main):011:0> alter 'some_table', {NAME => 'foo', MOB_THRESHOLD => '1000000000'}
+ Updating all regions with the new schema...
+ 9/25 regions updated.
+ 25/25 regions updated.
+ Done.
+ 0 row(s) in 3.4940 seconds
+```
+
+Note that if you are still ingesting data you must ensure this threshold is larger than any cell value you might write; MAX_INT would be a safe choice.
+
+
+
+
+
+##### Perform a major compaction on the table
+
+Specifically you are performing a "normal" compaction and not a MOB compaction.
+
+```ruby
hbase(main):012:0> major_compact 'some_table'
0 row(s) in 0.2600 seconds
-----
+```
+
+
+
+
+
+##### Monitor for the end of the major compaction
+
+Since compaction is handled asynchronously you'll need to use the shell to first see the compaction start and then see it end.
-. Monitor for the end of the major compaction. Since compaction is handled asynchronously you'll
-need to use the shell to first see the compaction start and then see it end.
-+
HBase should first say that a "MAJOR" compaction is happening.
-+
-----
+
+```ruby
hbase(main):015:0> @hbase.admin(@formatter).instance_eval do
hbase(main):016:1* p @admin.get_compaction_state('some_table').to_string
hbase(main):017:2* end
-“MAJOR”
-----
-+
+"MAJOR"
+```
+
When the compaction has finished the result should print out "NONE".
-+
-----
+
+```ruby
hbase(main):015:0> @hbase.admin(@formatter).instance_eval do
hbase(main):016:1* p @admin.get_compaction_state('some_table').to_string
hbase(main):017:2* end
-“NONE”
-----
-. Run the _mobrefs_ utility to ensure there are no MOB cells. Specifically, the tool will launch a
+"NONE"
+```
+
+
+
+
+
+Run the _mobrefs_ utility to ensure there are no MOB cells. Specifically, the tool will launch a
Hadoop MapReduce job that will show a job counter of 0 input records when we've successfully
rewritten all of the data.
-+
-----
+
+```bash
$> HADOOP_CLASSPATH=/etc/hbase/conf:$(hbase mapredcp) yarn jar \
- /some/path/to/hbase-shaded-mapreduce.jar mobrefs mobrefs-report-output some_table foo
+/some/path/to/hbase-shaded-mapreduce.jar mobrefs mobrefs-report-output some_table foo
...
19/12/10 11:38:47 INFO impl.YarnClientImpl: Submitted application application_1575695902338_0004
-19/12/10 11:38:47 INFO mapreduce.Job: The url to track the job: https://rm-2.example.com:8090/proxy/application_1575695902338_0004/
+19/12/10 11:38:47 INFO mapreduce.Job: The url to track the job: https://rm-2.example.com:8090/proxy application_1575695902338_0004/
19/12/10 11:38:47 INFO mapreduce.Job: Running job: job_1575695902338_0004
19/12/10 11:38:57 INFO mapreduce.Job: Job job_1575695902338_0004 running in uber mode : false
19/12/10 11:38:57 INFO mapreduce.Job: map 0% reduce 0%
@@ -544,21 +529,21 @@ $> HADOOP_CLASSPATH=/etc/hbase/conf:$(hbase mapredcp) yarn jar \
19/12/10 11:39:35 INFO mapreduce.Job: Job job_1575695902338_0004 completed successfully
19/12/10 11:39:35 INFO mapreduce.Job: Counters: 54
...
- Map-Reduce Framework
- Map input records=0
+ Map-Reduce Framework
+ Map input records=0
...
19/12/09 22:41:28 INFO mapreduce.MobRefReporter: Finished creating report for 'some_table', family='foo'
-----
-+
+```
+
If the data has not successfully been migrated out, this report will show both a non-zero number
of input records and a count of mob cells.
-+
-----
+
+```bash
$> HADOOP_CLASSPATH=/etc/hbase/conf:$(hbase mapredcp) yarn jar \
- /some/path/to/hbase-shaded-mapreduce.jar mobrefs mobrefs-report-output some_table foo
+/some/path/to/hbase-shaded-mapreduce.jar mobrefs mobrefs-report-output some_table foo
...
19/12/10 11:44:18 INFO impl.YarnClientImpl: Submitted application application_1575695902338_0005
-19/12/10 11:44:18 INFO mapreduce.Job: The url to track the job: https://busbey-2.gce.cloudera.com:8090/proxy/application_1575695902338_0005/
+19/12/10 11:44:18 INFO mapreduce.Job: The url to track the job: https://busbey-2.gce.cloudera.com:8090 proxy/application_1575695902338_0005/
19/12/10 11:44:18 INFO mapreduce.Job: Running job: job_1575695902338_0005
19/12/10 11:44:26 INFO mapreduce.Job: Job job_1575695902338_0005 running in uber mode : false
19/12/10 11:44:26 INFO mapreduce.Job: map 0% reduce 0%
@@ -574,109 +559,127 @@ $> HADOOP_CLASSPATH=/etc/hbase/conf:$(hbase mapredcp) yarn jar \
19/12/10 11:45:00 INFO mapreduce.Job: Job job_1575695902338_0005 completed successfully
19/12/10 11:45:00 INFO mapreduce.Job: Counters: 54
...
- Map-Reduce Framework
- Map input records=1
+ Map-Reduce Framework
+ Map input records=1
...
- MOB
- NUM_CELLS=1
+ MOB
+ NUM_CELLS=1
...
19/12/10 11:45:00 INFO mapreduce.MobRefReporter: Finished creating report for 'some_table', family='foo'
-----
-+
-If this happens you should verify that MOB compactions are disabled, verify that you have picked
-a sufficiently large MOB threshold, and redo the major compaction step.
-. When the _mobrefs_ report shows that no more data is stored in the MOB system then you can safely
-alter the column family configuration so that the MOB feature is disabled.
-+
-----
+```
+
+If this happens you should verify that MOB compactions are disabled, verify that you have picked a sufficiently large MOB threshold, and redo the major compaction step.
+
+
+
+
+
+##### Disable the MOB feature for the column family
+
+When the _mobrefs_ report shows that no more data is stored in the MOB system then you can safely alter the column family configuration so that the MOB feature is disabled.
+
+```ruby
hbase(main):017:0> alter 'some_table', {NAME => 'foo', IS_MOB => 'false'}
Updating all regions with the new schema...
8/25 regions updated.
25/25 regions updated.
Done.
0 row(s) in 2.9370 seconds
-----
-. After the column family no longer shows the MOB feature enabled, it is safe to start MOB
-maintenance chores again. You can allow the default to be used for
-`hbase.mob.compaction.chore.period` by removing it from your configuration files or restore
-it to whatever custom value you had prior to starting this process.
-. Once the MOB feature is disabled for the column family there will be no internal HBase process
+```
+
+The MOB feature will be disabled on a column family only after altering the column family and performing a major compaction. Before performing the major compaction after altering the column family, the MOB cells will still be present in the MOB storage.
+
+
+
+
+
+After the column family no longer shows the MOB feature enabled, it is safe to start MOB maintenance chores again. You can allow the default to be used for `hbase.mob.compaction.chore.period` by removing it from your configuration files or restore it to whatever custom value you had prior to starting this process.
+
+
+
+
+
+##### Clean up residual MOB data
+
+Once the MOB feature is disabled for the column family there will be no internal HBase process
looking for data in the MOB storage area specific to this column family. There will still be data
present there from prior to the compaction process that rewrote the values into HBase's data area.
You can check for this residual data directly in HDFS as an HBase superuser.
-+
-----
+
+```bash
$ hdfs dfs -count /hbase/mobdir/data/default/some_table
4 54 9063269081 /hbase/mobdir/data/default/some_table
-----
-+
-This data is spurious and may be reclaimed. You should sideline it, verify your application’s view
-of the table, and then delete it.
+```
+
+This data is spurious and may be reclaimed. You should sideline it, verify your application's view of the table, and then delete it.
+
+
+
+
-==== Data values over than the MOB threshold show up stored in non-MOB hfiles
+### Data values over than the MOB threshold show up stored in non-MOB hfiles
Bulk load and WAL split-to-HFile don't consider MOB threshold and write data into normal hfile (under /hbase/data directory).
-[NOTE]
-This won't cause any functional problem, during next compaction such data will be written out to the MOB hfiles.
+
+ This won't cause any functional problem, during next compaction such data will be written out to
+ the MOB hfiles.
+
-=== MOB Upgrade Considerations
+## MOB Upgrade Considerations
Generally, data stored using the MOB feature should transparently continue to work correctly across
HBase upgrades.
-==== Upgrading to a version with the "distributed MOB compaction" feature
+### Upgrading to a version with the "distributed MOB compaction" feature
Prior to the work in HBASE-22749, "Distributed MOB compactions", HBase had the Master coordinate all
compaction maintenance of the MOB hfiles. Centralizing management of the MOB data allowed for space
optimizations but safely coordinating that management with Region Servers resulted in edge cases that
-caused data loss (ref link:https://issues.apache.org/jira/browse/HBASE-22075[HBASE-22075]).
+caused data loss (ref [HBASE-22075](https://issues.apache.org/jira/browse/HBASE-22075)).
Users of the MOB feature upgrading to a version of HBase that includes HBASE-22749 should be aware
of the following changes:
-* The MOB system no longer allows setting "MOB Compaction Policies"
-* The MOB system no longer attempts to group MOB values by the date of the original cell's timestamp
+- The MOB system no longer allows setting "MOB Compaction Policies"
+- The MOB system no longer attempts to group MOB values by the date of the original cell's timestamp
according to said compaction policies, daily or otherwise
-* The MOB system no longer needs to track individual cell deletes through the use of special
+- The MOB system no longer needs to track individual cell deletes through the use of special
files in the MOB storage area with the suffix `_del`. After upgrading you should sideline these
files.
-* Under default configuration the MOB system should take much less time to perform a compaction of
+- Under default configuration the MOB system should take much less time to perform a compaction of
MOB stored values. This is a direct consequence of the fact that HBase will place a much larger
load on the underlying filesystem when doing compactions of MOB stored values; the additional load
should be a multiple on the order of magnitude of number of region servers. I.e. for a cluster
with three region servers and two masters the default configuration should have HBase put three
times the load on HDFS during major compactions that rewrite MOB data when compared to Master
handled MOB compaction; it should also be approximately three times as fast.
-* When the MOB system detects that a table has hfiles with references to MOB data but the reference
+- When the MOB system detects that a table has hfiles with references to MOB data but the reference
hfiles do not yet have the needed file level metadata (i.e. from use of the MOB feature prior to
HBASE-22749) then it will refuse to archive _any_ MOB hfiles from that table. The normal course of
periodic compactions done by Region Servers will update existing hfiles with MOB references, but
until a given table has been through the needed compactions operators should expect to see an
increased amount of storage used by the MOB feature.
-* Performing a compaction with type "MOB" no longer has special handling to compact specifically the
+- Performing a compaction with type "MOB" no longer has special handling to compact specifically the
MOB hfiles. Instead it will issue a warning and do a compaction of the table. For example using
the HBase shell as follows will result in a warning in the Master logs followed by a major
compaction of the 'example' table in its entirety or for the 'big' column respectively.
-+
-----
-hbase> major_compact 'example', nil, 'MOB'
-hbase> major_compact 'example', 'big', 'MOB'
-----
-+
-The same is true for directly using the Java API for
-`admin.majorCompact(TableName.valueOf("example"), CompactType.MOB)`.
-* Similarly, manually performing a major compaction on a table or region will also handle compacting
+ ```ruby
+ hbase> major_compact 'example', nil, 'MOB'
+ hbase> major_compact 'example', 'big', 'MOB'
+ ```
+ The same is true for directly using the Java API for `admin.majorCompact(TableName.valueOf("example"), CompactType.MOB)`.
+- Similarly, manually performing a major compaction on a table or region will also handle compacting
the MOB stored values for that table or region respectively.
The following configuration setting has been deprecated and replaced:
-* `hbase.master.mob.ttl.cleaner.period` has been replaced with `hbase.master.mob.cleaner.period`
+- `hbase.master.mob.ttl.cleaner.period` has been replaced with `hbase.master.mob.cleaner.period`
The following configuration settings are no longer used:
-* `hbase.mob.compaction.mergeable.threshold`
-* `hbase.mob.delfile.max.count`
-* `hbase.mob.compaction.batch.size`
-* `hbase.mob.compactor.class`
-* `hbase.mob.compaction.threads.max`
+- `hbase.mob.compaction.mergeable.threshold`
+- `hbase.mob.delfile.max.count`
+- `hbase.mob.compaction.batch.size`
+- `hbase.mob.compactor.class`
+- `hbase.mob.compaction.threads.max`
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/hdfs.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/hdfs.mdx
new file mode 100644
index 000000000000..d5e36ab9959c
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/hdfs.mdx
@@ -0,0 +1,16 @@
+---
+title: "HDFS"
+description: "How HBase leverages HDFS for distributed storage, including NameNode and DataNode architecture and file replication."
+---
+
+As HBase runs on HDFS (and each StoreFile is written as a file on HDFS), it is important to have an understanding of the HDFS Architecture especially in terms of how it stores files, handles failovers, and replicates blocks.
+
+See the Hadoop documentation on [HDFS Architecture](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) for more information.
+
+## NameNode [#hdfs-namenode]
+
+The NameNode is responsible for maintaining the filesystem metadata. See the above HDFS Architecture link for more information.
+
+## DataNode [#hdfs-datanode]
+
+The DataNodes are responsible for storing HDFS blocks. See the above HDFS Architecture link for more information.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/index.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/index.mdx
new file mode 100644
index 000000000000..66deec76e9da
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/index.mdx
@@ -0,0 +1,10 @@
+---
+title: "Architecture"
+description: "Comprehensive guide to HBase architecture including client-server model, regions, WAL, compaction, and advanced features."
+---
+
+## Resources [#architecture-resources]
+
+1. More information about the design and implementation can be found at the jira issue: [HBASE-10070](https://issues.apache.org/jira/browse/HBASE-10070)
+
+2. HBaseCon 2014 talk: [HBase Read High Availability Using Timeline-Consistent Region Replicas](https://hbase.apache.org/www.hbasecon.com/#2014-PresentationsRecordings) also contains some details and [slides](http://www.slideshare.net/enissoz/hbase-high-availability-for-reads-with-time).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/master.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/master.mdx
new file mode 100644
index 000000000000..fff948ad1b0b
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/master.mdx
@@ -0,0 +1,95 @@
+---
+title: "Master"
+description: "HBase Master server responsibilities including RegionServer monitoring, metadata operations, load balancing, and failover behavior."
+---
+
+`HMaster` is the implementation of the Master Server. The Master server is responsible for monitoring all RegionServer instances in the cluster, and is the interface for all metadata changes. In a distributed cluster, the Master typically runs on the [NameNode](/docs/architecture/hdfs#hdfs-namenode). J Mohamed Zahoor goes into some more detail on the Master Architecture in this blog posting, [HBase HMaster Architecture](https://web.archive.org/web/20191211053128/http://blog.zahoor.in/2012/08/hbase-hmaster-architecture/).
+
+## Startup Behavior
+
+If run in a multi-Master environment, all Masters compete to run the cluster. If the active Master loses its lease in ZooKeeper (or the Master shuts down), then the remaining Masters jostle to take over the Master role.
+
+## Runtime Impact
+
+A common dist-list question involves what happens to an HBase cluster when the Master goes down. This information has changed starting 3.0.0.
+
+### Up until releases 2.x.y
+
+Because the HBase client talks directly to the RegionServers, the cluster can still function in a "steady state". Additionally, per [Catalog Tables](/docs/architecture/catalog-tables), `hbase:meta` exists as an HBase table and is not resident in the Master. However, the Master controls critical functions such as RegionServer failover and completing region splits. So while the cluster can still run for a short time without the Master, the Master should be restarted as soon as possible.
+
+### Staring release 3.0.0
+
+As mentioned in section [Master Registry (new as of 2.3.0)](/docs/architecture/client#masterregistry-rpc-hedging), the default connection registry for clients is now based on master rpc end points. Hence the requirements for masters' uptime are even tighter starting this release.
+
+- At least one active or stand by master is needed for a connection set up, unlike before when all the clients needed was a ZooKeeper ensemble.
+- Master is now in critical path for read/write operations. For example, if the meta region bounces off to a different region server, clients need master to fetch the new locations. Earlier this was done by fetching this information directly from ZooKeeper.
+- Masters will now have higher connection load than before. So, the server side configuration might need adjustment depending on the load.
+
+Overall, the master uptime requirements, when this feature is enabled, are even higher for the client operations to go through.
+
+## Interface [#architecture-master-interface]
+
+The methods exposed by `HMasterInterface` are primarily metadata-oriented methods:
+
+- Table (createTable, modifyTable, removeTable, enable, disable)
+- ColumnFamily (addColumn, modifyColumn, removeColumn)
+- Region (move, assign, unassign) For example, when the `Admin` method `disableTable` is invoked, it is serviced by the Master server.
+
+## Processes [#architecture-master-processes]
+
+The Master runs several background threads:
+
+### LoadBalancer
+
+Periodically, and when there are no regions in transition, a load balancer will run and move regions around to balance the cluster's load. See [Balancer](/docs/configuration/important#balancer) for configuring this property.
+
+See [Region-RegionServer Assignment](/docs/architecture/regions#region-regionserver-assignment) for more information on region assignment.
+
+### CatalogJanitor
+
+Periodically checks and cleans up the `hbase:meta` table. See [hbase:meta](/docs/architecture/catalog-tables#hbasemeta) for more information on the meta table.
+
+## MasterProcWAL
+
+_MasterProcWAL is replaced in hbase-2.3.0 by an alternate Procedure Store implementation; see [in-master-procedure-store-region](/docs/upgrading/paths#new-in-master-procedure-store). This section pertains to hbase-2.0.0 through hbase-2.2.x_
+
+HMaster records administrative operations and their running states, such as the handling of a crashed server, table creation, and other DDLs, into a Procedure Store. The Procedure Store WALs are stored under the MasterProcWALs directory. The Master WALs are not like RegionServer WALs. Keeping up the Master WAL allows us to run a state machine that is resilient across Master failures. For example, if a HMaster was in the middle of creating a table encounters an issue and fails, the next active HMaster can take up where the previous left off and carry the operation to completion. Since hbase-2.0.0, a new AssignmentManager (A.K.A AMv2) was introduced and the HMaster handles region assignment operations, server crash processing, balancing, etc., all via AMv2 persisting all state and transitions into MasterProcWALs rather than up into ZooKeeper, as we do in hbase-1.x.
+
+See [AMv2 Description for Devs](/docs/amv2) (and [Procedure Framework (Pv2): HBASE-12439](/docs/pv2) for its basis) if you would like to learn more about the new AssignmentManager.
+
+### Configurations for MasterProcWAL
+
+Here are the list of configurations that effect MasterProcWAL operation. You should not have to change your defaults.
+
+- **`hbase.procedure.store.wal.periodic.roll.msec`**
+ **Description:** Frequency of generating a new WAL
+ **Default:** `1h (3600000 in msec)`
+
+- **`hbase.procedure.store.wal.roll.threshold`**
+ **Description:** Threshold in size before the WAL rolls. Every time the WAL reaches this size or the above period, 1 hour, passes since last log roll, the HMaster will generate a new WAL.
+ **Default:** `32MB (33554432 in byte)`
+
+- **`hbase.procedure.store.wal.warn.threshold`**
+ **Description:** If the number of WALs goes beyond this threshold, the following message should appear in the HMaster log with WARN level when rolling.
+
+ ```
+ procedure WALs count=xx above the warning threshold 64. check running procedures to see if something is stuck.
+ ```
+
+ **Default:** `64`
+
+- **`hbase.procedure.store.wal.max.retries.before.roll`**
+ **Description:** Max number of retry when syncing slots (records) to its underlying storage, such as HDFS. Every attempt, the following message should appear in the HMaster log.
+
+ ```
+ unable to sync slots, retry=xx
+ ```
+
+ **Default:** `3`
+
+- **`hbase.procedure.store.wal.sync.failure.roll.max`**
+ **Description:** After the above 3 retrials, the log is rolled and the retry count is reset to 0, thereon a new set of retrial starts. This configuration controls the max number of attempts of log rolling upon sync failure. That is, HMaster is allowed to fail to sync 9 times in total. Once it exceeds, the following log should appear in the HMaster log.
+ ```
+ Sync slots after log roll failed, abort.
+ ```
+ **Default:** `3`
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/meta.json b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/meta.json
new file mode 100644
index 000000000000..912ceb161d3e
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/meta.json
@@ -0,0 +1,17 @@
+{
+ "title": "Architecture",
+ "pages": [
+ "overview",
+ "catalog-tables",
+ "client",
+ "client-request-filters",
+ "master",
+ "regionserver",
+ "regions",
+ "bulk-loading",
+ "hdfs",
+ "timeline-consistent-reads",
+ "hbase-mob",
+ "snapshot-scanner"
+ ]
+}
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/overview.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/overview.mdx
new file mode 100644
index 000000000000..f0daf27f84fd
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/overview.mdx
@@ -0,0 +1,36 @@
+---
+title: "Overview"
+description: "Introduction to HBase as a NoSQL distributed database, key features, scalability characteristics, and when to use HBase."
+---
+
+## NoSQL?
+
+HBase is a type of "NoSQL" database. "NoSQL" is a general term meaning that the database isn't an RDBMS which supports SQL as its primary access language, but there are many types of NoSQL databases: BerkeleyDB is an example of a local NoSQL database, whereas HBase is very much a distributed database. Technically speaking, HBase is really more a "Data Store" than "Data Base" because it lacks many of the features you find in an RDBMS, such as typed columns, secondary indexes, triggers, and advanced query languages, etc.
+
+However, HBase has many features which supports both linear and modular scaling. HBase clusters expand by adding RegionServers that are hosted on commodity class servers. If a cluster expands from 10 to 20 RegionServers, for example, it doubles both in terms of storage and as well as processing capacity. An RDBMS can scale well, but only up to a point - specifically, the size of a single database server - and for the best performance requires specialized hardware and storage devices. HBase features of note are:
+
+- Strongly consistent reads/writes: HBase is not an "eventually consistent" DataStore. This makes it very suitable for tasks such as high-speed counter aggregation.
+- Automatic sharding: HBase tables are distributed on the cluster via regions, and regions are automatically split and re-distributed as your data grows.
+- Automatic RegionServer failover
+- Hadoop/HDFS Integration: HBase supports HDFS out of the box as its distributed file system.
+- MapReduce: HBase supports massively parallelized processing via MapReduce for using HBase as both source and sink.
+- Java Client API: HBase supports an easy to use Java API for programmatic access.
+- Thrift/REST API: HBase also supports Thrift and REST for non-Java front-ends.
+- Block Cache and Bloom Filters: HBase supports a Block Cache and Bloom Filters for high volume query optimization.
+- Operational Management: HBase provides build-in web-pages for operational insight as well as JMX metrics.
+
+## When Should I Use HBase? [#architecture-overview-when-should-i-use-hbase]
+
+HBase isn't suitable for every problem.
+
+First, make sure you have enough data. If you have hundreds of millions or billions of rows, then HBase is a good candidate. If you only have a few thousand/million rows, then using a traditional RDBMS might be a better choice due to the fact that all of your data might wind up on a single node (or two) and the rest of the cluster may be sitting idle.
+
+Second, make sure you can live without all the extra features that an RDBMS provides (e.g., typed columns, secondary indexes, transactions, advanced query languages, etc.) An application built against an RDBMS cannot be "ported" to HBase by simply changing a JDBC driver, for example. Consider moving from an RDBMS to HBase as a complete redesign as opposed to a port.
+
+Third, make sure you have enough hardware. Even HDFS doesn't do well with anything less than 5 DataNodes (due to things such as HDFS block replication which has a default of 3), plus a NameNode.
+
+HBase can run quite well stand-alone on a laptop - but this should be considered a development configuration only.
+
+## What Is The Difference Between HBase and Hadoop/HDFS?
+
+[HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) is a distributed file system that is well suited for the storage of large files. Its documentation states that it is not, however, a general purpose file system, and does not provide fast individual record lookups in files. HBase, on the other hand, is built on top of HDFS and provides fast record lookups (and updates) for large tables. This can sometimes be a point of conceptual confusion. HBase internally puts your data in indexed "StoreFiles" that exist on HDFS for high-speed lookups. See the [Data Model](/docs/datamodel) and the rest of this chapter for more information on how HBase achieves its goals.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/regions.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/regions.mdx
new file mode 100644
index 000000000000..fa483a101470
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/regions.mdx
@@ -0,0 +1,841 @@
+---
+title: "Regions"
+description: "Understanding HBase regions, stores, memstore, write-ahead log (WAL), compaction, splits, and region management strategies."
+---
+
+Regions are the basic element of availability and distribution for tables, and are comprised of a Store per Column Family. The hierarchy of objects is as follows:
+
+```
+Table (HBase table)
+ Region (Regions for the table)
+ Store (Store per ColumnFamily for each Region for the table)
+ MemStore (MemStore for each Store for each Region for the table)
+ StoreFile (StoreFiles for each Store for each Region for the table)
+ Block (Blocks within a StoreFile within a Store for each Region for the table)
+```
+
+For a description of what HBase files look like when written to HDFS, see [Browsing HDFS for HBase Objects](/docs/troubleshooting#browsing-hdfs-for-hbase-objects).
+
+## Considerations for Number of Regions
+
+In general, HBase is designed to run with a small (20-200) number of relatively large (5-20Gb) regions per server. The considerations for this are as follows:
+
+### Why should I keep my Region count low?
+
+Typically you want to keep your region count low on HBase for numerous reasons. Usually right around 100 regions per RegionServer has yielded the best results. Here are some of the reasons below for keeping region count low:
+
+1. MSLAB (MemStore-local allocation buffer) requires 2MB per MemStore (that's 2MB per family per region). 1000 regions that have 2 families each is 3.9GB of heap used, and it's not even storing data yet. NB: the 2MB value is configurable.
+2. If you fill all the regions at somewhat the same rate, the global memory usage makes it that it forces tiny flushes when you have too many regions which in turn generates compactions. Rewriting the same data tens of times is the last thing you want. An example is filling 1000 regions (with one family) equally and let's consider a lower bound for global MemStore usage of 5GB (the region server would have a big heap). Once it reaches 5GB it will force flush the biggest region, at that point they should almost all have about 5MB of data so it would flush that amount. 5MB inserted later, it would flush another region that will now have a bit over 5MB of data, and so on. This is currently the main limiting factor for the number of regions; see [Number of regions per RS - upper bound](/docs/operational-management/region-and-capacity#number-of-regions-per-rs---upper-bound) for detailed formula.
+3. The master as is is allergic to tons of regions, and will take a lot of time assigning them and moving them around in batches. The reason is that it's heavy on ZK usage, and it's not very async at the moment (could really be improved — and has been improved a bunch in 0.96 HBase).
+4. In older versions of HBase (pre-HFile v2, 0.90 and previous), tons of regions on a few RS can cause the store file index to rise, increasing heap usage and potentially creating memory pressure or OOME on the RSs
+
+Another issue is the effect of the number of regions on MapReduce jobs; it is typical to have one mapper per HBase region. Thus, hosting only 5 regions per RS may not be enough to get sufficient number of tasks for a MapReduce job, while 1000 regions will generate far too many tasks.
+
+See [Determining region count and size](/docs/operational-management/region-and-capacity#determining-region-count-and-size) for configuration guidelines.
+
+## Region-RegionServer Assignment
+
+This section describes how Regions are assigned to RegionServers.
+
+### Startup
+
+When HBase starts regions are assigned as follows (short version):
+
+1. The Master invokes the `AssignmentManager` upon startup.
+2. The `AssignmentManager` looks at the existing region assignments in `hbase:meta`.
+3. If the region assignment is still valid (i.e., if the RegionServer is still online) then the assignment is kept.
+4. If the assignment is invalid, then the `LoadBalancerFactory` is invoked to assign the region. The load balancer (`StochasticLoadBalancer` by default in HBase 1.0) assign the region to a RegionServer.
+5. `hbase:meta` is updated with the RegionServer assignment (if needed) and the RegionServer start codes (start time of the RegionServer process) upon region opening by the RegionServer.
+
+### Failover
+
+When a RegionServer fails:
+
+1. The regions immediately become unavailable because the RegionServer is down.
+2. The Master will detect that the RegionServer has failed.
+3. The region assignments will be considered invalid and will be re-assigned just like the startup sequence.
+4. In-flight queries are re-tried, and not lost.
+5. Operations are switched to a new RegionServer within the following amount of time:
+ ```text
+ ZooKeeper session timeout + split time + assignment/replay time
+ ```
+
+### Region Load Balancing
+
+Regions can be periodically moved by the [LoadBalancer](/docs/architecture/master#loadbalancer).
+
+### Region State Transition
+
+HBase maintains a state for each region and persists the state in `hbase:meta`. The state of the `hbase:meta` region itself is persisted in ZooKeeper. You can see the states of regions in transition in the Master web UI. Following is the list of possible region states.
+
+**Possible Region States:**
+
+- `OFFLINE`: the region is offline and not opening
+- `OPENING`: the region is in the process of being opened
+- `OPEN`: the region is open and the RegionServer has notified the master
+- `FAILED_OPEN`: the RegionServer failed to open the region
+- `CLOSING`: the region is in the process of being closed
+- `CLOSED`: the RegionServer has closed the region and notified the master
+- `FAILED_CLOSE`: the RegionServer failed to close the region
+- `SPLITTING`: the RegionServer notified the master that the region is splitting
+- `SPLIT`: the RegionServer notified the master that the region has finished splitting
+- `SPLITTING_NEW`: this region is being created by a split which is in progress
+- `MERGING`: the RegionServer notified the master that this region is being merged with another region
+- `MERGED`: the RegionServer notified the master that this region has been merged
+- `MERGING_NEW`: this region is being created by a merge of two regions
+
+
+
+**Graph Legend:**
+
+- Brown: Offline state, a special state that can be transient (after closed before opening), terminal (regions of disabled tables), or initial (regions of newly created tables)
+- Palegreen: Online state that regions can serve requests
+- Lightblue: Transient states
+- Red: Failure states that need OPS attention
+- Gold: Terminal states of regions split/merged
+- Grey: Initial states of regions created through split/merge
+
+**Transition State Descriptions:**
+
+1. The master moves a region from `OFFLINE` to `OPENING` state and tries to assign the region to a RegionServer. The RegionServer may or may not have received the open region request. The master retries sending the open region request to the RegionServer until the RPC goes through or the master runs out of retries. After the RegionServer receives the open region request, the RegionServer begins opening the region.
+2. If the master is running out of retries, the master prevents the RegionServer from opening the region by moving the region to `CLOSING` state and trying to close it, even if the RegionServer is starting to open the region.
+3. After the RegionServer opens the region, it continues to try to notify the master until the master moves the region to `OPEN` state and notifies the RegionServer. The region is now open.
+4. If the RegionServer cannot open the region, it notifies the master. The master moves the region to `CLOSED` state and tries to open the region on a different RegionServer.
+5. If the master cannot open the region on any of a certain number of regions, it moves the region to `FAILED_OPEN` state, and takes no further action until an operator intervenes from the HBase shell, or the server is dead.
+6. The master moves a region from `OPEN` to `CLOSING` state. The RegionServer holding the region may or may not have received the close region request. The master retries sending the close request to the server until the RPC goes through or the master runs out of retries.
+7. If the RegionServer is not online, or throws `NotServingRegionException`, the master moves the region to `OFFLINE` state and re-assigns it to a different RegionServer.
+8. If the RegionServer is online, but not reachable after the master runs out of retries, the master moves the region to `FAILED_CLOSE` state and takes no further action until an operator intervenes from the HBase shell, or the server is dead.
+9. If the RegionServer gets the close region request, it closes the region and notifies the master. The master moves the region to `CLOSED` state and re-assigns it to a different RegionServer.
+10. Before assigning a region, the master moves the region to `OFFLINE` state automatically if it is in `CLOSED` state.
+11. When a RegionServer is about to split a region, it notifies the master. The master moves the region to be split from `OPEN` to `SPLITTING` state and add the two new regions to be created to the RegionServer. These two regions are in `SPLITTING_NEW` state initially.
+12. After notifying the master, the RegionServer starts to split the region. Once past the point of no return, the RegionServer notifies the master again so the master can update the `hbase:meta` table. However, the master does not update the region states until it is notified by the server that the split is done. If the split is successful, the splitting region is moved from `SPLITTING` to `SPLIT` state and the two new regions are moved from `SPLITTING_NEW` to `OPEN` state.
+13. If the split fails, the splitting region is moved from `SPLITTING` back to `OPEN` state, and the two new regions which were created are moved from `SPLITTING_NEW` to `OFFLINE` state.
+14. When a RegionServer is about to merge two regions, it notifies the master first. The master moves the two regions to be merged from `OPEN` to `MERGING` state, and adds the new region which will hold the contents of the merged regions region to the RegionServer. The new region is in `MERGING_NEW` state initially.
+15. After notifying the master, the RegionServer starts to merge the two regions. Once past the point of no return, the RegionServer notifies the master again so the master can update the META. However, the master does not update the region states until it is notified by the RegionServer that the merge has completed. If the merge is successful, the two merging regions are moved from `MERGING` to `MERGED` state and the new region is moved from `MERGING_NEW` to `OPEN` state.
+16. If the merge fails, the two merging regions are moved from `MERGING` back to `OPEN` state, and the new region which was created to hold the contents of the merged regions is moved from `MERGING_NEW` to `OFFLINE` state.
+17. For regions in `FAILED_OPEN` or `FAILED_CLOSE` states, the master tries to close them again when they are reassigned by an operator via HBase Shell.
+
+## Region-RegionServer Locality
+
+Over time, Region-RegionServer locality is achieved via HDFS block replication. The HDFS client does the following by default when choosing locations to write replicas:
+
+1. First replica is written to local node
+2. Second replica is written to a random node on another rack
+3. Third replica is written on the same rack as the second, but on a different node chosen randomly
+4. Subsequent replicas are written on random nodes on the cluster. See _Replica Placement: The First Baby Steps_ on this page: [HDFS Architecture](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)
+
+Thus, HBase eventually achieves locality for a region after a flush or a compaction. In a RegionServer failover situation a RegionServer may be assigned regions with non-local StoreFiles (because none of the replicas are local), however as new data is written in the region, or the table is compacted and StoreFiles are re-written, they will become "local" to the RegionServer.
+
+For more information, see _Replica Placement: The First Baby Steps_ on this page: [HDFS Architecture](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html) and also Lars George's blog on [HBase and HDFS locality](http://www.larsgeorge.com/2010/05/hbase-file-locality-in-hdfs.html).
+
+## Region Splits
+
+Regions split when they reach a configured threshold. Below we treat the topic in short. For a longer exposition, see [Apache HBase Region Splitting and Merging](http://hortonworks.com/blog/apache-hbase-region-splitting-and-merging/) by our Enis Soztutar.
+
+Splits run unaided on the RegionServer; i.e. the Master does not participate. The RegionServer splits a region, offlines the split region and then adds the daughter regions to `hbase:meta`, opens daughters on the parent's hosting RegionServer and then reports the split to the Master. See [Managed Splitting](/docs/configuration/important#managed-splitting) for how to manually manage splits (and for why you might do this).
+
+### Custom Split Policies
+
+You can override the default split policy using a custom [RegionSplitPolicy](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.html) (HBase 0.94+). Typically a custom split policy should extend HBase's default split policy: [IncreasingToUpperBoundRegionSplitPolicy](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.html).
+
+The policy can set globally through the HBase configuration or on a per-table basis.
+
+#### Configuring the Split Policy Globally in _hbase-site.xml_
+
+```xml
+
+ hbase.regionserver.region.split.policy
+ org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy
+
+```
+
+#### Configuring a Split Policy On a Table Using the Java API
+
+```java
+HTableDescriptor tableDesc = new HTableDescriptor("test");
+tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
+tableDesc.addFamily(new HColumnDescriptor(Bytes.toBytes("cf1")));
+admin.createTable(tableDesc);
+```
+
+#### Configuring the Split Policy On a Table Using HBase Shell
+
+```ruby
+hbase> create 'test', {METADATA => {'SPLIT_POLICY' => 'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy'}},{NAME => 'cf1'}
+```
+
+The policy can be set globally through the HBaseConfiguration used or on a per table basis:
+
+```java
+HTableDescriptor myHtd = ...;
+myHtd.setValue(HTableDescriptor.SPLIT_POLICY, MyCustomSplitPolicy.class.getName());
+```
+
+
+ The `DisabledRegionSplitPolicy` policy blocks manual region splitting.
+
+
+## Manual Region Splitting
+
+It is possible to manually split your table, either at table creation (pre-splitting), or at a later time as an administrative action. You might choose to split your region for one or more of the following reasons. There may be other valid reasons, but the need to manually split your table might also point to problems with your schema design.
+
+**Reasons to Manually Split Your Table:**
+
+- Your data is sorted by timeseries or another similar algorithm that sorts new data at the end of the table. This means that the Region Server holding the last region is always under load, and the other Region Servers are idle, or mostly idle. See also [Monotonically Increasing Row Keys/Timeseries Data](/docs/regionserver-sizing#monotonically-increasing-row-keystimeseries-data).
+- You have developed an unexpected hotspot in one region of your table. For instance, an application which tracks web searches might be inundated by a lot of searches for a celebrity in the event of news about that celebrity. See [perf.one.region](/docs/performance#anti-pattern-one-hot-region) for more discussion about this particular scenario.
+- After a big increase in the number of RegionServers in your cluster, to get the load spread out quickly.
+- Before a bulk-load which is likely to cause unusual and uneven load across regions.
+
+See [Managed Splitting](/docs/configuration/important#managed-splitting) for a discussion about the dangers and possible benefits of managing splitting completely manually.
+
+
+ The `DisabledRegionSplitPolicy` policy blocks manual region splitting.
+
+
+### Determining Split Points
+
+The goal of splitting your table manually is to improve the chances of balancing the load across the cluster in situations where good rowkey design alone won't get you there. Keeping that in mind, the way you split your regions is very dependent upon the characteristics of your data. It may be that you already know the best way to split your table. If not, the way you split your table depends on what your keys are like.
+
+**Alphanumeric Rowkeys**
+If your rowkeys start with a letter or number, you can split your table at letter or number boundaries. For instance, the following command creates a table with regions that split at each vowel, so the first region has A-D, the second region has E-H, the third region has I-N, the fourth region has O-V, and the fifth region has U-Z.
+
+**Using a Custom Algorithm**
+The RegionSplitter tool is provided with HBase, and uses a _SplitAlgorithm_ to determine split points for you. As parameters, you give it the algorithm, desired number of regions, and column families. It includes three split algorithms. The first is the [`HexStringSplit`](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html) algorithm, which assumes the row keys are hexadecimal strings. The second is the [`DecimalStringSplit`](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.DecimalStringSplit.html) algorithm, which assumes the row keys are decimal strings in the range 00000000 to 99999999. The third, [`UniformSplit`](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html), assumes the row keys are random byte arrays. You will probably need to develop your own [`SplitAlgorithm`](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html), using the provided ones as models.
+
+## Online Region Merges
+
+Both Master and RegionServer participate in the event of online region merges. Client sends merge RPC to the master, then the master moves the regions together to the RegionServer where the more heavily loaded region resided. Finally the master sends the merge request to this RegionServer which then runs the merge. Similar to process of region splitting, region merges run as a local transaction on the RegionServer. It offlines the regions and then merges two regions on the file system, atomically delete merging regions from `hbase:meta` and adds the merged region to `hbase:meta`, opens the merged region on the RegionServer and reports the merge to the Master.
+
+An example of region merges in the HBase shell
+
+```ruby
+$ hbase> merge_region 'ENCODED_REGIONNAME', 'ENCODED_REGIONNAME'
+$ hbase> merge_region 'ENCODED_REGIONNAME', 'ENCODED_REGIONNAME', true
+```
+
+It's an asynchronous operation and call returns immediately without waiting merge completed. Passing `true` as the optional third parameter will force a merge. Normally only adjacent regions can be merged. The `force` parameter overrides this behaviour and is for expert use only.
+
+## Store
+
+A Store hosts a MemStore and 0 or more StoreFiles (HFiles). A Store corresponds to a column family for a table for a given region.
+
+### MemStore
+
+The MemStore holds in-memory modifications to the Store. Modifications are Cells/KeyValues. When a flush is requested, the current MemStore is moved to a snapshot and is cleared. HBase continues to serve edits from the new MemStore and backing snapshot until the flusher reports that the flush succeeded. At this point, the snapshot is discarded. Note that when the flush happens, MemStores that belong to the same region will all be flushed.
+
+### MemStore Flush
+
+A MemStore flush can be triggered under any of the conditions listed below. The minimum flush unit is per region, not at individual MemStore level.
+
+1. When a MemStore reaches the size specified by `hbase.hregion.memstore.flush.size`, all MemStores that belong to its region will be flushed out to disk.
+2. When the overall MemStore usage reaches the value specified by `hbase.regionserver.global.memstore.upperLimit`, MemStores from various regions will be flushed out to disk to reduce overall MemStore usage in a RegionServer.
+ The flush order is based on the descending order of a region's MemStore usage. Regions will have their MemStores flushed until the overall MemStore usage drops to or slightly below `hbase.regionserver.global.memstore.lowerLimit`.
+3. When the number of WAL log entries in a given region server's WAL reaches the value specified in `hbase.regionserver.max.logs`, MemStores from various regions will be flushed out to disk to reduce the number of logs in the WAL.
+ The flush order is based on time. Regions with the oldest MemStores are flushed first until WAL count drops below `hbase.regionserver.max.logs`.
+
+### Scans [#architecture-regions-store-scans]
+
+- When a client issues a scan against a table, HBase generates `RegionScanner` objects, one per region, to serve the scan request.
+- The `RegionScanner` object contains a list of `StoreScanner` objects, one per column family.
+- Each `StoreScanner` object further contains a list of `StoreFileScanner` objects, corresponding to each StoreFile and HFile of the corresponding column family, and a list of `KeyValueScanner` objects for the MemStore.
+- The two lists are merged into one, which is sorted in ascending order with the scan object for the MemStore at the end of the list.
+- When a `StoreFileScanner` object is constructed, it is associated with a `MultiVersionConcurrencyControl` read point, which is the current `memstoreTS`, filtering out any new updates beyond the read point.
+
+### StoreFile (HFile)
+
+StoreFiles are where your data lives.
+
+#### HFile Format
+
+The _HFile_ file format is based on the SSTable file described in the [BigTable \[2006](http://research.google.com/archive/bigtable.html)\] paper and on Hadoop's [TFile](https://hadoop.apache.org/docs/current/api/org/apache/hadoop/io/file/tfile/TFile.html) (The unit test suite and the compression harness were taken directly from TFile). Schubert Zhang's blog post on [HFile: A Block-Indexed File Format to Store Sorted Key-Value Pairs](http://cloudepr.blogspot.com/2009/09/hfile-block-indexed-file-format-to.html) makes for a thorough introduction to HBase's HFile. Matteo Bertozzi has also put up a helpful description, [HBase I/O: HFile](http://th30z.blogspot.com/2011/02/hbase-io-hfile.html?spref=tw).
+
+For more information, see the HFile source code. Also see [HBase file format with inline blocks (version 2)](/docs/hfile-format#hbase-file-format-with-inline-blocks-version-2) for information about the HFile v2 format that was included in 0.92.
+
+#### HFile Tool [#architecture-regions-store-hfile-tool]
+
+To view a textualized version of HFile content, you can use the `hbase hfile` tool. Type the following to see usage:
+
+```bash
+$ ${HBASE_HOME}/bin/hbase hfile
+```
+
+For example, to view the content of the file _hdfs://10.81.47.41:9000/hbase/default/TEST/1418428042/DSMP/4759508618286845475_, type the following:
+
+```bash
+$ ${HBASE_HOME}/bin/hbase hfile -v -f hdfs://10.81.47.41:9000/hbase/default/TEST/1418428042/DSMP/4759508618286845475
+```
+
+If you leave off the option -v to see just a summary on the HFile. See usage for other things to do with the `hfile` tool.
+
+
+ In the output of this tool, you might see 'seqid=0' for certain keys in places such as
+ 'Mid-key'/'firstKey'/'lastKey'. These are 'KeyOnlyKeyValue' type instances - meaning their seqid
+ is irrelevant & we just need the keys of these Key-Value instances.
+
+
+#### StoreFile Directory Structure on HDFS
+
+For more information of what StoreFiles look like on HDFS with respect to the directory structure, see [Browsing HDFS for HBase Objects](/docs/troubleshooting#browsing-hdfs-for-hbase-objects).
+
+### Blocks
+
+StoreFiles are composed of blocks. The blocksize is configured on a per-ColumnFamily basis.
+
+Compression happens at the block level within StoreFiles. For more information on compression, see [Compression and Data Block Encoding In HBase](/docs/compression).
+
+For more information on blocks, see the HFileBlock source code.
+
+### KeyValue
+
+The KeyValue class is the heart of data storage in HBase. KeyValue wraps a byte array and takes offsets and lengths into the passed array which specify where to start interpreting the content as KeyValue.
+
+The KeyValue format inside a byte array is:
+
+- keylength
+- valuelength
+- key
+- value
+
+The Key is further decomposed as:
+
+- rowlength
+- row (i.e., the rowkey)
+- columnfamilylength
+- columnfamily
+- columnqualifier
+- timestamp
+- keytype (e.g., Put, Delete, DeleteColumn, DeleteFamily)
+
+KeyValue instances are _not_ split across blocks. For example, if there is an 8 MB KeyValue, even if the block-size is 64kb this KeyValue will be read in as a coherent block. For more information, see the KeyValue source code.
+
+#### Example [#architecture-regions-store-keyvalue-example]
+
+To emphasize the points above, examine what happens with two Puts for two different columns for the same row:
+
+- Put \#1: `rowkey=row1, cf:attr1=value1`
+- Put \#2: `rowkey=row1, cf:attr2=value2`
+
+Even though these are for the same row, a KeyValue is created for each column:
+
+Key portion for Put \#1:
+
+- `rowlength -----------→ 4`
+- `row -----------------→ row1`
+- `columnfamilylength --→ 2`
+- `columnfamily --------→ cf`
+- `columnqualifier -----→ attr1`
+- `timestamp -----------→ server time of Put`
+- `keytype -------------→ Put`
+
+Key portion for Put \#2:
+
+- `rowlength -----------→ 4`
+- `row -----------------→ row1`
+- `columnfamilylength --→ 2`
+- `columnfamily --------→ cf`
+- `columnqualifier -----→ attr2`
+- `timestamp -----------→ server time of Put`
+- `keytype -------------→ Put`
+
+It is critical to understand that the rowkey, ColumnFamily, and column (aka columnqualifier) are embedded within the KeyValue instance. The longer these identifiers are, the bigger the KeyValue is.
+
+### Compaction
+
+**Ambiguous Terminology:**
+
+- A _StoreFile_ is a facade of HFile. In terms of compaction, use of StoreFile seems to have prevailed in the past.
+- A _Store_ is the same thing as a ColumnFamily. StoreFiles are related to a Store, or ColumnFamily.
+- If you want to read more about StoreFiles versus HFiles and Stores versus ColumnFamilies, see [HBASE-11316](https://issues.apache.org/jira/browse/HBASE-11316).
+
+When the MemStore reaches a given size (`hbase.hregion.memstore.flush.size`), it flushes its contents to a StoreFile. The number of StoreFiles in a Store increases over time. _Compaction_ is an operation which reduces the number of StoreFiles in a Store, by merging them together, in order to increase performance on read operations. Compactions can be resource-intensive to perform, and can either help or hinder performance depending on many factors.
+
+Compactions fall into two categories: minor and major. Minor and major compactions differ in the following ways.
+
+_Minor compactions_ usually select a small number of small, adjacent StoreFiles and rewrite them as a single StoreFile. Minor compactions do not drop (filter out) deletes or expired versions, because of potential side effects. See [Compaction and Deletions](/docs/architecture/regions#compaction-and-deletions) and [Compaction and Versions](/docs/architecture/regions#compaction-and-versions) for information on how deletes and versions are handled in relation to compactions. The end result of a minor compaction is fewer, larger StoreFiles for a given Store.
+
+The end result of a _major compaction_ is a single StoreFile per Store. Major compactions also process delete markers and max versions. See [Compaction and Deletions](/docs/architecture/regions#compaction-and-deletions) and [Compaction and Versions](/docs/architecture/regions#compaction-and-versions) for information on how deletes and versions are handled in relation to compactions.
+
+#### Compaction and Deletions
+
+When an explicit deletion occurs in HBase, the data is not actually deleted. Instead, a _tombstone_ marker is written. The tombstone marker prevents the data from being returned with queries. During a major compaction, the data is actually deleted, and the tombstone marker is removed from the StoreFile. If the deletion happens because of an expired TTL, no tombstone is created. Instead, the expired data is filtered out and is not written back to the compacted StoreFile.
+
+#### Compaction and Versions
+
+When you create a Column Family, you can specify the maximum number of versions to keep, by specifying `ColumnFamilyDescriptorBuilder.setMaxVersions(int versions)`. The default value is `1`. If more versions than the specified maximum exist, the excess versions are filtered out and not written back to the compacted StoreFile.
+
+
+ In some situations, older versions can be inadvertently resurrected if a newer version is
+ explicitly deleted. See [Major compactions change query
+ results](/docs/datamodel#major-compactions-change-query-results) for a more in-depth explanation.
+ This situation is only possible before the compaction finishes.
+
+
+In theory, major compactions improve performance. However, on a highly loaded system, major compactions can require an inappropriate number of resources and adversely affect performance. In a default configuration, major compactions are scheduled automatically to run once in a 7-day period. This is sometimes inappropriate for systems in production. You can manage major compactions manually. See [Managed Compactions](/docs/configuration/important#managed-compactions).
+
+Compactions do not perform region merges. See [Merge](/docs/operational-management/region-and-capacity#merge) for more information on region merging.
+
+#### Compaction Switch
+
+We can switch on and off the compactions at region servers. Switching off compactions will also interrupt any currently ongoing compactions. It can be done dynamically using the "compaction_switch" command from hbase shell. If done from the command line, this setting will be lost on restart of the server. To persist the changes across region servers modify the configuration hbase.regionserver .compaction.enabled in hbase-site.xml and restart HBase.
+
+#### Compaction Policy - HBase 0.96.x and newer
+
+Compacting large StoreFiles, or too many StoreFiles at once, can cause more IO load than your cluster is able to handle without causing performance problems. The method by which HBase selects which StoreFiles to include in a compaction (and whether the compaction is a minor or major compaction) is called the _compaction policy_.
+
+Prior to HBase 0.96.x, there was only one compaction policy. That original compaction policy is still available as `RatioBasedCompactionPolicy`. The new compaction default policy, called `ExploringCompactionPolicy`, was subsequently backported to HBase 0.94 and HBase 0.95, and is the default in HBase 0.96 and newer. It was implemented in [HBASE-7842](https://issues.apache.org/jira/browse/HBASE-7842). In short, `ExploringCompactionPolicy` attempts to select the best possible set of StoreFiles to compact with the least amount of work, while the `RatioBasedCompactionPolicy` selects the first set that meets the criteria.
+
+Regardless of the compaction policy used, file selection is controlled by several configurable parameters and happens in a multi-step approach. These parameters will be explained in context, and then will be given in a table which shows their descriptions, defaults, and implications of changing them.
+
+#### Being Stuck
+
+When the MemStore gets too large, it needs to flush its contents to a StoreFile. However, Stores are configured with a bound on the number StoreFiles, `hbase.hstore.blockingStoreFiles`, and if in excess, the MemStore flush must wait until the StoreFile count is reduced by one or more compactions. If the MemStore is too large and the number of StoreFiles is also too high, the algorithm is said to be "stuck". By default we'll wait on compactions up to `hbase.hstore.blockingWaitTime` milliseconds. If this period expires, we'll flush anyways even though we are in excess of the `hbase.hstore.blockingStoreFiles` count.
+
+Upping the `hbase.hstore.blockingStoreFiles` count will allow flushes to happen but a Store with many StoreFiles in will likely have higher read latencies. Try to figure why Compactions are not keeping up. Is it a write spurt that is bringing about this situation or is a regular occurance and the cluster is under-provisioned for the volume of writes?
+
+#### The ExploringCompactionPolicy Algorithm
+
+The ExploringCompactionPolicy algorithm considers each possible set of adjacent StoreFiles before choosing the set where compaction will have the most benefit.
+
+One situation where the ExploringCompactionPolicy works especially well is when you are bulk-loading data and the bulk loads create larger StoreFiles than the StoreFiles which are holding data older than the bulk-loaded data. This can "trick" HBase into choosing to perform a major compaction each time a compaction is needed, and cause a lot of extra overhead. With the ExploringCompactionPolicy, major compactions happen much less frequently because minor compactions are more efficient.
+
+In general, ExploringCompactionPolicy is the right choice for most situations, and thus is the default compaction policy. You can also use ExploringCompactionPolicy along with [Experimental: Stripe Compactions](/docs/architecture/regions#experimental-stripe-compactions).
+
+The logic of this policy can be examined in hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java. The following is a walk-through of the logic of the ExploringCompactionPolicy.
+
+1. Make a list of all existing StoreFiles in the Store. The rest of the algorithm filters this list to come up with the subset of HFiles which will be chosen for compaction.
+2. If this was a user-requested compaction, attempt to perform the requested compaction type, regardless of what would normally be chosen. Note that even if the user requests a major compaction, it may not be possible to perform a major compaction. This may be because not all StoreFiles in the Column Family are available to compact or because there are too many Stores in the Column Family.
+3. Some StoreFiles are automatically excluded from consideration. These include:
+ - StoreFiles that are larger than `hbase.hstore.compaction.max.size`
+ - StoreFiles that were created by a bulk-load operation which explicitly excluded compaction. You may decide to exclude StoreFiles resulting from bulk loads, from compaction. To do this, specify the `hbase.mapreduce.hfileoutputformat.compaction.exclude` parameter during the bulk load operation.
+4. Iterate through the list from step 1, and make a list of all potential sets of StoreFiles to compact together. A potential set is a grouping of `hbase.hstore.compaction.min` contiguous StoreFiles in the list. For each set, perform some sanity-checking and figure out whether this is the best compaction that could be done:
+ - If the number of StoreFiles in this set (not the size of the StoreFiles) is fewer than `hbase.hstore.compaction.min` or more than `hbase.hstore.compaction.max`, take it out of consideration.
+ - Compare the size of this set of StoreFiles with the size of the smallest possible compaction that has been found in the list so far. If the size of this set of StoreFiles represents the smallest compaction that could be done, store it to be used as a fall-back if the algorithm is "stuck" and no StoreFiles would otherwise be chosen. See [Being Stuck](/docs/architecture/regions#being-stuck).
+ - Do size-based sanity checks against each StoreFile in this set of StoreFiles.
+ - If the size of this StoreFile is larger than `hbase.hstore.compaction.max.size`, take it out of consideration.
+ - If the size is greater than or equal to `hbase.hstore.compaction.min.size`, sanity-check it against the file-based ratio to see whether it is too large to be considered.
+
+ The sanity-checking is successful if:
+ - There is only one StoreFile in this set, or
+ - For each StoreFile, its size multiplied by `hbase.hstore.compaction.ratio` (or `hbase.hstore.compaction.ratio.offpeak` if off-peak hours are configured and it is during off-peak hours) is less than the sum of the sizes of the other HFiles in the set.
+
+5. If this set of StoreFiles is still in consideration, compare it to the previously-selected best compaction. If it is better, replace the previously-selected best compaction with this one.
+6. When the entire list of potential compactions has been processed, perform the best compaction that was found. If no StoreFiles were selected for compaction, but there are multiple StoreFiles, assume the algorithm is stuck (see [Being Stuck](/docs/architecture/regions#being-stuck)) and if so, perform the smallest compaction that was found in step 3.
+
+#### RatioBasedCompactionPolicy Algorithm
+
+The RatioBasedCompactionPolicy was the only compaction policy prior to HBase 0.96, though ExploringCompactionPolicy has now been backported to HBase 0.94 and 0.95. To use the RatioBasedCompactionPolicy rather than the ExploringCompactionPolicy, set `hbase.hstore.defaultengine.compactionpolicy.class` to `RatioBasedCompactionPolicy` in the _hbase-site.xml_ file. To switch back to the ExploringCompactionPolicy, remove the setting from the _hbase-site.xml_.
+
+The following section walks you through the algorithm used to select StoreFiles for compaction in the RatioBasedCompactionPolicy.
+
+1. The first phase is to create a list of all candidates for compaction. A list is created of all StoreFiles not already in the compaction queue, and all StoreFiles newer than the newest file that is currently being compacted. This list of StoreFiles is ordered by the sequence ID. The sequence ID is generated when a Put is appended to the write-ahead log (WAL), and is stored in the metadata of the HFile.
+2. Check to see if the algorithm is stuck (see [Being Stuck](/docs/architecture/regions#being-stuck), and if so, a major compaction is forced. This is a key area where [The ExploringCompactionPolicy Algorithm](/docs/architecture/regions#the-exploringcompactionpolicy-algorithm) is often a better choice than the RatioBasedCompactionPolicy.
+3. If the compaction was user-requested, try to perform the type of compaction that was requested. Note that a major compaction may not be possible if all HFiles are not available for compaction or if too many StoreFiles exist (more than `hbase.hstore.compaction.max`).
+4. Some StoreFiles are automatically excluded from consideration. These include:
+ - StoreFiles that are larger than `hbase.hstore.compaction.max.size`
+ - StoreFiles that were created by a bulk-load operation which explicitly excluded compaction. You may decide to exclude StoreFiles resulting from bulk loads, from compaction. To do this, specify the `hbase.mapreduce.hfileoutputformat.compaction.exclude` parameter during the bulk load operation.
+5. The maximum number of StoreFiles allowed in a major compaction is controlled by the `hbase.hstore.compaction.max` parameter. If the list contains more than this number of StoreFiles, a minor compaction is performed even if a major compaction would otherwise have been done. However, a user-requested major compaction still occurs even if there are more than `hbase.hstore.compaction.max` StoreFiles to compact.
+6. If the list contains fewer than `hbase.hstore.compaction.min` StoreFiles to compact, a minor compaction is aborted. Note that a major compaction can be performed on a single HFile. Its function is to remove deletes and expired versions, and reset locality on the StoreFile.
+7. The value of the `hbase.hstore.compaction.ratio` parameter is multiplied by the sum of StoreFiles smaller than a given file, to determine whether that StoreFile is selected for compaction during a minor compaction. For instance, if hbase.hstore.compaction.ratio is 1.2, FileX is 5MB, FileY is 2MB, and FileZ is 3MB:
+ ```
+ 5 <= 1.2 x (2 + 3) or 5 <= 6
+ ```
+ In this scenario, FileX is eligible for minor compaction. If FileX were 7MB, it would not be eligible for minor compaction. This ratio favors smaller StoreFile. You can configure a different ratio for use in off-peak hours, using the parameter `hbase.hstore.compaction.ratio.offpeak`, if you also configure `hbase.offpeak.start.hour` and `hbase.offpeak.end.hour`.
+8. If the last major compaction was too long ago and there is more than one StoreFile to be compacted, a major compaction is run, even if it would otherwise have been minor. By default, the maximum time between major compactions is 7 days, plus or minus a 4.8 hour period, and determined randomly within those parameters. Prior to HBase 0.96, the major compaction period was 24 hours. See `hbase.hregion.majorcompaction` in the table below to tune or disable time-based major compactions.
+
+#### Parameters Used by Compaction Algorithm
+
+This table contains the main configuration parameters for compaction. This list is not exhaustive. To tune these parameters from the defaults, edit the _hbase-default.xml_ file. For a full list of all configuration parameters available, see [config.files](/docs/configuration/default#configuration-default-hbase-default-configuration)
+
+- `hbase.hstore.compaction.min`
+ The minimum number of StoreFiles which must be eligible for compaction before compaction can run. The goal of tuning `hbase.hstore.compaction.min` is to avoid ending up with too many tiny StoreFiles to compact. Setting this value to 2 would cause a minor compaction each time you have two StoreFiles in a Store, and this is probably not appropriate. If you set this value too high, all the other values will need to be adjusted accordingly. For most cases, the default value is appropriate. In previous versions of HBase, the parameter `hbase.hstore.compaction.min` was called `hbase.hstore.compactionThreshold`.
+ **Default**: 3
+
+- `hbase.hstore.compaction.max`
+ The maximum number of StoreFiles which will be selected for a single minor compaction, regardless of the number of eligible StoreFiles. Effectively, the value of `hbase.hstore.compaction.max` controls the length of time it takes a single compaction to complete. Setting it larger means that more StoreFiles are included in a compaction. For most cases, the default value is appropriate.
+ **Default**: 10
+
+- `hbase.hstore.compaction.min.size`
+ A StoreFile smaller than this size will always be eligible for minor compaction. StoreFiles this size or larger are evaluated by `hbase.hstore.compaction.ratio` to determine if they are eligible. Because this limit represents the "automatic include" limit for all StoreFiles smaller than this value, this value may need to be reduced in write-heavy environments where many files in the 1-2 MB range are being flushed, because every StoreFile will be targeted for compaction and the resulting StoreFiles may still be under the minimum size and require further compaction. If this parameter is lowered, the ratio check is triggered more quickly. This addressed some issues seen in earlier versions of HBase but changing this parameter is no longer necessary in most situations.
+ **Default**:128 MB
+
+- `hbase.hstore.compaction.max.size`
+ A StoreFile larger than this size will be excluded from compaction. The effect of raising `hbase.hstore.compaction.max.size` is fewer, larger StoreFiles that do not get compacted often. If you feel that compaction is happening too often without much benefit, you can try raising this value.
+ **Default**: `Long.MAX_VALUE`
+
+- `hbase.hstore.compaction.ratio`
+ For minor compaction, this ratio is used to determine whether a given StoreFile which is larger than `hbase.hstore.compaction.min.size` is eligible for compaction. Its effect is to limit compaction of large StoreFile. The value of `hbase.hstore.compaction.ratio` is expressed as a floating-point decimal.
+ - A large ratio, such as 10, will produce a single giant StoreFile. Conversely, a value of .25, will produce behavior similar to the BigTable compaction algorithm, producing four StoreFiles.
+ - A moderate value of between 1.0 and 1.4 is recommended. When tuning this value, you are balancing write costs with read costs. Raising the value (to something like 1.4) will have more write costs, because you will compact larger StoreFiles. However, during reads, HBase will need to seek through fewer StoreFiles to accomplish the read. Consider this approach if you cannot take advantage of [Bloom Filters](/docs/performance#bloom-filters).
+ - Alternatively, you can lower this value to something like 1.0 to reduce the background cost of writes, and use to limit the number of StoreFiles touched during reads. For most cases, the default value is appropriate.
+ **Default**: `1.2F`
+
+- `hbase.hstore.compaction.ratio.offpeak`
+ The compaction ratio used during off-peak compactions, if off-peak hours are also configured (see below). Expressed as a floating-point decimal. This allows for more aggressive (or less aggressive, if you set it lower than `hbase.hstore.compaction.ratio`) compaction during a set time period. Ignored if off-peak is disabled (default). This works the same as `hbase.hstore.compaction.ratio`.
+ **Default**: `5.0F`
+
+- `hbase.offpeak.start.hour`
+ The start of off-peak hours, expressed as an integer between 0 and 23, inclusive. Set to -1 to disable off-peak.
+ **Default**: `-1` (disabled)
+
+- `hbase.offpeak.end.hour`
+ The end of off-peak hours, expressed as an integer between 0 and 23, inclusive. Set to -1 to disable off-peak.
+ **Default**: `-1` (disabled)
+
+- `hbase.regionserver.thread.compaction.throttle`
+ There are two different thread pools for compactions, one for large compactions and the other for small compactions. This helps to keep compaction of lean tables (such as `hbase:meta`) fast. If a compaction is larger than this threshold, it goes into the large compaction pool. In most cases, the default value is appropriate.
+ **Default**: `2 x hbase.hstore.compaction.max x hbase.hregion.memstore.flush.size` (which defaults to `128`)
+
+- `hbase.hregion.majorcompaction`
+ Time between major compactions, expressed in milliseconds. Set to 0 to disable time-based automatic major compactions. User-requested and size-based major compactions will still run. This value is multiplied by `hbase.hregion.majorcompaction.jitter` to cause compaction to start at a somewhat-random time during a given window of time.
+ **Default**: 7 days (`604800000` milliseconds)
+
+- `hbase.hregion.majorcompaction.jitter`
+ A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur a given amount of time either side of `hbase.hregion.majorcompaction`. The smaller the number, the closer the compactions will happen to the `hbase.hregion.majorcompaction` interval. Expressed as a floating-point decimal.
+ **Default**: `.50F`
+
+#### Compaction File Selection
+
+
+ This section has been preserved for historical reasons and refers to the way compaction worked
+ prior to HBase 0.96.x. You can still use this behavior if you enable [RatioBasedCompactionPolicy
+ Algorithm](/docs/architecture/regions#ratiobasedcompactionpolicy-algorithm). For information on
+ the way that compactions work in HBase 0.96.x and later, see
+ [Compaction](/docs/architecture/regions#compaction).
+
+
+To understand the core algorithm for StoreFile selection, there is some ASCII-art in the Store source code that will serve as useful reference.
+
+It has been copied below:
+
+```java
+/* normal skew:
+ *
+ * older ----> newer
+ * _
+ * | | _
+ * | | | | _
+ * --|-|- |-|- |-|---_-------_------- minCompactSize
+ * | | | | | | | | _ | |
+ * | | | | | | | | | | | |
+ * | | | | | | | | | | | |
+ */
+```
+
+**Important knobs:**
+
+- `hbase.hstore.compaction.ratio` Ratio used in compaction file selection algorithm (default 1.2f).
+- `hbase.hstore.compaction.min` (in HBase v 0.90 this is called `hbase.hstore.compactionThreshold`) (files) Minimum number of StoreFiles per Store to be selected for a compaction to occur (default 2).
+- `hbase.hstore.compaction.max` (files) Maximum number of StoreFiles to compact per minor compaction (default 10).
+- `hbase.hstore.compaction.min.size` (bytes) Any StoreFile smaller than this setting with automatically be a candidate for compaction. Defaults to `hbase.hregion.memstore.flush.size` (128 mb).
+- `hbase.hstore.compaction.max.size` (.92) (bytes) Any StoreFile larger than this setting with automatically be excluded from compaction (default Long.MAX_VALUE).
+
+The minor compaction StoreFile selection logic is size based, and selects a file for compaction when the `file ⇐ sum(smaller_files) * hbase.hstore.compaction.ratio`.
+
+#### Minor Compaction File Selection - Example \#1 (Basic Example)
+
+This example mirrors an example from the unit test `TestCompactSelection`.
+
+- `hbase.hstore.compaction.ratio` = 1.0f
+- `hbase.hstore.compaction.min` = 3 (files)
+- `hbase.hstore.compaction.max` = 5 (files)
+- `hbase.hstore.compaction.min.size` = 10 (bytes)
+- `hbase.hstore.compaction.max.size` = 1000 (bytes)
+
+The following StoreFiles exist: 100, 50, 23, 12, and 12 bytes apiece (oldest to newest). With the above parameters, the files that would be selected for minor compaction are 23, 12, and 12.
+
+Why?
+
+- 100 → No, because sum(50, 23, 12, 12) \* 1.0 = 97.
+- 50 → No, because sum(23, 12, 12) \* 1.0 = 47.
+- 23 → Yes, because sum(12, 12) \* 1.0 = 24.
+- 12 → Yes, because the previous file has been included, and because this does not exceed the max-file limit of 5
+- 12 → Yes, because the previous file had been included, and because this does not exceed the max-file limit of 5.
+
+#### Minor Compaction File Selection - Example \#2 (Not Enough Files ToCompact)
+
+This example mirrors an example from the unit test `TestCompactSelection`.
+
+- `hbase.hstore.compaction.ratio` = 1.0f
+- `hbase.hstore.compaction.min` = 3 (files)
+- `hbase.hstore.compaction.max` = 5 (files)
+- `hbase.hstore.compaction.min.size` = 10 (bytes)
+- `hbase.hstore.compaction.max.size` = 1000 (bytes)
+
+The following StoreFiles exist: 100, 25, 12, and 12 bytes apiece (oldest to newest). With the above parameters, no compaction will be started.
+
+Why?
+
+- 100 → No, because sum(25, 12, 12) \* 1.0 = 47
+- 25 → No, because sum(12, 12) \* 1.0 = 24
+- 12 → No. Candidate because sum(12) \* 1.0 = 12, there are only 2 files to compact and that is less than the threshold of 3
+- 12 → No. Candidate because the previous StoreFile was, but there are not enough files to compact
+
+#### Minor Compaction File Selection - Example \#3 (Limiting Files To Compact)
+
+This example mirrors an example from the unit test `TestCompactSelection`.
+
+- `hbase.hstore.compaction.ratio` = 1.0f
+- `hbase.hstore.compaction.min` = 3 (files)
+- `hbase.hstore.compaction.max` = 5 (files)
+- `hbase.hstore.compaction.min.size` = 10 (bytes)
+- `hbase.hstore.compaction.max.size` = 1000 (bytes)
+
+The following StoreFiles exist: 7, 6, 5, 4, 3, 2, and 1 bytes apiece (oldest to newest). With the above parameters, the files that would be selected for minor compaction are 7, 6, 5, 4, 3.
+
+Why?
+
+- 7 → Yes, because sum(6, 5, 4, 3, 2, 1) \* 1.0 = 21. Also, 7 is less than the min-size
+- 6 → Yes, because sum(5, 4, 3, 2, 1) \* 1.0 = 15. Also, 6 is less than the min-size.
+- 5 → Yes, because sum(4, 3, 2, 1) \* 1.0 = 10. Also, 5 is less than the min-size.
+- 4 → Yes, because sum(3, 2, 1) \* 1.0 = 6. Also, 4 is less than the min-size.
+- 3 → Yes, because sum(2, 1) \* 1.0 = 3. Also, 3 is less than the min-size.
+- 2 → No. Candidate because previous file was selected and 2 is less than the min-size, but the max-number of files to compact has been reached.
+- 1 → No. Candidate because previous file was selected and 1 is less than the min-size, but max-number of files to compact has been reached.
+
+
+ This information is now included in the configuration parameter table in [Parameters Used by
+ Compaction Algorithm](/docs/architecture/regions#parameters-used-by-compaction-algorithm).
+
+
+#### Date Tiered Compaction
+
+Date tiered compaction is a date-aware store file compaction strategy that is beneficial for time-range scans for time-series data.
+
+#### When To Use Date Tiered Compactions
+
+Consider using Date Tiered Compaction for reads for limited time ranges, especially scans of recent data
+
+Don't use it for
+
+- random gets without a limited time range
+- frequent deletes and updates
+- Frequent out of order data writes creating long tails, especially writes with future timestamps
+- frequent bulk loads with heavily overlapping time ranges
+
+**Performance Improvements**
+Performance testing has shown that the performance of time-range scans improve greatly for limited time ranges, especially scans of recent data.
+
+#### Enabling Date Tiered Compaction
+
+You can enable Date Tiered compaction for a table or a column family, by setting its `hbase.hstore.engine.class` to `org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine`.
+
+You also need to set `hbase.hstore.blockingStoreFiles` to a high number, such as 60, if using all default settings, rather than the default value of 12). Use 1.5~2 x projected file count if changing the parameters, Projected file count = windows per tier x tier count + incoming window min + files older than max age
+
+You also need to set `hbase.hstore.compaction.max` to the same value as `hbase.hstore.blockingStoreFiles` to unblock major compaction.
+
+**Procedure: Enable Date Tiered Compaction**
+
+
+
+
+
+Run one of following commands in the HBase shell. Replace the table name `orders_table` with the name of your table.
+
+```ruby
+alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', 'hbase.hstore.blockingStoreFiles' => '60', 'hbase.hstore.compaction.min'=>'2', 'hbase.hstore.compaction.max'=>'60'}
+alter 'orders_table', {NAME => 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', 'hbase.hstore.blockingStoreFiles' => '60', 'hbase.hstore.compaction.min'=>'2', 'hbase.hstore.compaction.max'=>'60'}}
+create 'orders_table', 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine', 'hbase.hstore.blockingStoreFiles' => '60', 'hbase.hstore.compaction.min'=>'2', 'hbase.hstore.compaction.max'=>'60'}
+```
+
+
+
+
+
+Configure other options if needed. See [Configuring Date Tiered Compaction](/docs/architecture/regions#configuring-date-tiered-compaction) for more information.
+
+
+
+
+
+**Procedure: Disable Date Tiered Compaction**
+
+
+
+
+
+Set the `hbase.hstore.engine.class` option to either nil or `org.apache.hadoop.hbase.regionserver.DefaultStoreEngine`. Either option has the same effect. Make sure you set the other options you changed to the original settings too.
+
+```ruby
+alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DefaultStoreEngine', 'hbase.hstore.blockingStoreFiles' => '12', 'hbase.hstore.compaction.min'=>'6', 'hbase.hstore.compaction.max'=>'12'}
+```
+
+
+
+
+
+When you change the store engine either way, a major compaction will likely be performed on most regions. This is not necessary on new tables.
+
+#### Configuring Date Tiered Compaction
+
+Each of the settings for date tiered compaction should be configured at the table or column family level. If you use HBase shell, the general command pattern is as follows:
+
+```ruby
+alter 'orders_table', CONFIGURATION => {'key' => 'value', ..., 'key' => 'value'}}
+```
+
+**Data Tier Parameters**
+You can configure your date tiers by changing the settings for the following parameters:
+
+| Setting | Notes |
+| -------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `hbase.hstore.compaction.date.tiered.max.storefile.age.millis` | Files with max-timestamp smaller than this will no longer be compacted.Default at Long.MAX_VALUE. |
+| `hbase.hstore.compaction.date.tiered.base.window.millis` | Base window size in milliseconds. Default at 6 hours. |
+| `hbase.hstore.compaction.date.tiered.windows.per.tier` | Number of windows per tier. Default at 4. |
+| `hbase.hstore.compaction.date.tiered.incoming.window.min` | Minimal number of files to compact in the incoming window. Set it to expected number of files in the window to avoid wasteful compaction. Default at 6. |
+| `hbase.hstore.compaction.date.tiered.window.policy.class` | The policy to select store files within the same time window. It doesn't apply to the incoming window. Default at exploring compaction. This is to avoid wasteful compaction. |
+
+**Compaction Throttler**
+With tiered compaction all servers in the cluster will promote windows to higher tier at the same time, so using a compaction throttle is recommended: Set `hbase.regionserver.throughput.controller` to `org.apache.hadoop.hbase.regionserver.compactions.PressureAwareCompactionThroughputController`.
+
+
+ For more information about date tiered compaction, please refer to the design specification at
+ [https://docs.google.com/document/d/1_AmlNb2N8Us1xICsTeGDLKIqL6T-oHoRLZ323MG_uy8](https://docs.google.com/document/d/1_AmlNb2N8Us1xICsTeGDLKIqL6T-oHoRLZ323MG_uy8)
+
+
+#### Experimental: Stripe Compactions
+
+Stripe compactions is an experimental feature added in HBase 0.98 which aims to improve compactions for large regions or non-uniformly distributed row keys. In order to achieve smaller and/or more granular compactions, the StoreFiles within a region are maintained separately for several row-key sub-ranges, or "stripes", of the region. The stripes are transparent to the rest of HBase, so other operations on the HFiles or data work without modification.
+
+Stripe compactions change the HFile layout, creating sub-regions within regions. These sub-regions are easier to compact, and should result in fewer major compactions. This approach alleviates some of the challenges of larger regions.
+
+Stripe compaction is fully compatible with [Compaction](/docs/architecture/regions#compaction) and works in conjunction with either the ExploringCompactionPolicy or RatioBasedCompactionPolicy. It can be enabled for existing tables, and the table will continue to operate normally if it is disabled later.
+
+#### When To Use Stripe Compactions
+
+Consider using stripe compaction if you have either of the following:
+
+- Large regions. You can get the positive effects of smaller regions without additional overhead for MemStore and region management overhead.
+- Non-uniform keys, such as time dimension in a key. Only the stripes receiving the new keys will need to compact. Old data will not compact as often, if at all
+
+**Performance Improvements**
+Performance testing has shown that the performance of reads improves somewhat, and variability of performance of reads and writes is greatly reduced. An overall long-term performance improvement is seen on large non-uniform-row key regions, such as a hash-prefixed timestamp key. These performance gains are the most dramatic on a table which is already large. It is possible that the performance improvement might extend to region splits.
+
+#### Enabling Stripe Compaction
+
+You can enable stripe compaction for a table or a column family, by setting its `hbase.hstore.engine.class` to `org.apache.hadoop.hbase.regionserver.StripeStoreEngine`. You also need to set the `hbase.hstore.blockingStoreFiles` to a high number, such as 100 (rather than the default value of 10).
+
+**Procedure: Enable Stripe Compaction**
+
+
+
+
+
+Run one of following commands in the HBase shell. Replace the table name `orders_table` with the name of your table.
+
+```ruby
+alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.StripeStoreEngine', 'hbase.hstore.blockingStoreFiles' => '100'}
+alter 'orders_table', {NAME => 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.StripeStoreEngine', 'hbase.hstore.blockingStoreFiles' => '100'}}
+create 'orders_table', 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.StripeStoreEngine', 'hbase.hstore.blockingStoreFiles' => '100'}
+```
+
+
+
+
+
+Configure other options if needed. See [Configuring Stripe Compaction](/docs/architecture/regions#configuring-stripe-compaction) for more information.
+
+
+
+
+
+Enable the table.
+
+
+
+
+
+**Procedure: Disable Stripe Compaction**
+
+
+
+
+
+Set the `hbase.hstore.engine.class` option to either nil or `org.apache.hadoop.hbase.regionserver.DefaultStoreEngine`. Either option has the same effect.
+
+```ruby
+alter 'orders_table', CONFIGURATION => {'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DefaultStoreEngine'}
+```
+
+
+
+
+
+Enable the table.
+
+
+
+
+
+When you enable a large table after changing the store engine either way, a major compaction will likely be performed on most regions. This is not necessary on new tables.
+
+#### Configuring Stripe Compaction
+
+Each of the settings for stripe compaction should be configured at the table or column family level. If you use HBase shell, the general command pattern is as follows:
+
+```ruby
+alter 'orders_table', CONFIGURATION => {'key' => 'value', ..., 'key' => 'value'}}
+```
+
+**Region and stripe sizing**
+You can configure your stripe sizing based upon your region sizing. By default, your new regions will start with one stripe. On the next compaction after the stripe has grown too large (16 x MemStore flushes size), it is split into two stripes. Stripe splitting continues as the region grows, until the region is large enough to split.
+
+You can improve this pattern for your own data. A good rule is to aim for a stripe size of at least 1 GB, and about 8-12 stripes for uniform row keys. For example, if your regions are 30 GB, 12 x 2.5 GB stripes might be a good starting point.
+
+**Stripe Sizing Settings**
+
+| Setting | Notes |
+| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `hbase.store.stripe.initialStripeCount` | The number of stripes to create when stripe compaction is enabled. You can use it as follows: - For relatively uniform row keys, if you know the approximate target number of stripes from the above, you can avoid some splitting overhead by starting with several stripes (2, 5, 10...). If the early data is not representative of overall row key distribution, this will not be as efficient. - For existing tables with a large amount of data, this setting will effectively pre-split your stripes. - For keys such as hash-prefixed sequential keys, with more than one hash prefix per region, pre-splitting may make sense. |
+| `hbase.store.stripe.sizeToSplit` | The maximum size a stripe grows before splitting. Use this in conjunction with `hbase.store.stripe.splitPartCount` to control the target stripe size (`sizeToSplit = splitPartsCount * target stripe size`), according to the above sizing considerations. |
+| `hbase.store.stripe.splitPartCount` | The number of new stripes to create when splitting a stripe. The default is 2, which is appropriate for most cases. For non-uniform row keys, you can experiment with increasing the number to 3 or 4, to isolate the arriving updates into narrower slice of the region without additional splits being required. |
+
+**MemStore Size Settings**
+By default, the flush creates several files from one MemStore, according to existing stripe boundaries and row keys to flush. This approach minimizes write amplification, but can be undesirable if the MemStore is small and there are many stripes, because the files will be too small.
+In this type of situation, you can set `hbase.store.stripe.compaction.flushToL0` to `true`. This will cause a MemStore flush to create a single file instead. When at least `hbase.store.stripe.compaction.minFilesL0` such files (by default, 4) accumulate, they will be compacted into striped files.
+
+**Normal Compaction Configuration and Stripe Compaction**
+All the settings that apply to normal compactions (see [Parameters Used by Compaction Algorithm](/docs/architecture/regions#parameters-used-by-compaction-algorithm)) apply to stripe compactions. The exceptions are the minimum and maximum number of files, which are set to higher values by default because the files in stripes are smaller. To control these for stripe compactions, use `hbase.store.stripe.compaction.minFiles` and `hbase.store.stripe.compaction.maxFiles`, rather than `hbase.hstore.compaction.min` and `hbase.hstore.compaction.max`.
+
+#### FIFO Compaction
+
+FIFO compaction policy selects only files which have all cells expired. The column family **MUST** have non-default TTL. Essentially, FIFO compactor only collects expired store files.
+
+Because we don't do any real compaction, we do not use CPU and IO (disk and network) and evict hot data from a block cache. As a result, both RW throughput and latency can be improved.
+
+#### When To Use FIFO Compaction
+
+Consider using FIFO Compaction when your use case is
+
+- Very high volume raw data which has low TTL and which is the source of another data (after additional processing).
+- Data which can be kept entirely in a a block cache (RAM/SSD). No need for compaction of a raw data at all.
+
+Do not use FIFO compaction when
+
+- Table/ColumnFamily has MIN_VERSION \> 0
+- Table/ColumnFamily has TTL = FOREVER (HColumnDescriptor.DEFAULT_TTL)
+
+#### Enabling FIFO Compaction
+
+For Table:
+
+```java
+HTableDescriptor desc = new HTableDescriptor(tableName);
+desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+ FIFOCompactionPolicy.class.getName());
+```
+
+For Column Family:
+
+```java
+HColumnDescriptor desc = new HColumnDescriptor(family);
+desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+ FIFOCompactionPolicy.class.getName());
+```
+
+From HBase Shell:
+
+```bash
+create 'x',{NAME=>'y', TTL=>'30'}, {CONFIGURATION => {'hbase.hstore.defaultengine.compactionpolicy.class' => 'org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy', 'hbase.hstore.blockingStoreFiles' => 1000}}
+```
+
+Although region splitting is still supported, for optimal performance it should be disabled, either by setting explicitly `DisabledRegionSplitPolicy` or by setting `ConstantSizeRegionSplitPolicy` and very large max region size. You will have to increase to a very large number store's blocking file (`hbase.hstore.blockingStoreFiles`) as well. There is a sanity check on table/column family configuration in case of FIFO compaction and minimum value for number of blocking file is 1000.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/regionserver.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/regionserver.mdx
new file mode 100644
index 000000000000..5a8231bb66f5
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/regionserver.mdx
@@ -0,0 +1,662 @@
+---
+title: "RegionServer"
+description: "HBase RegionServer implementation, interfaces, read/write paths, block cache, memstore management, and performance tuning."
+---
+
+`HRegionServer` is the RegionServer implementation. It is responsible for serving and managing regions. In a distributed cluster, a RegionServer runs on a [DataNode](/docs/architecture/hdfs#hdfs-datanode).
+
+## Interface [#architecture-regionserver-interface]
+
+The methods exposed by `HRegionRegionInterface` contain both data-oriented and region-maintenance methods:
+
+- Data (get, put, delete, next, etc.)
+- Region (splitRegion, compactRegion, etc.) For example, when the `Admin` method `majorCompact` is invoked on a table, the client is actually iterating through all regions for the specified table and requesting a major compaction directly to each region.
+
+## Processes [#architecture-regionserver-processes]
+
+The RegionServer runs a variety of background threads:
+
+### CompactSplitThread
+
+Checks for splits and handle minor compactions.
+
+### MajorCompactionChecker
+
+Checks for major compactions.
+
+### MemStoreFlusher
+
+Periodically flushes in-memory writes in the MemStore to StoreFiles.
+
+### LogRoller
+
+Periodically checks the RegionServer's WAL.
+
+## Coprocessors
+
+Coprocessors were added in 0.92. There is a thorough [Blog Overview of CoProcessors](https://blogs.apache.org/hbase/entry/coprocessor_introduction) posted. Documentation will eventually move to this reference guide, but the blog is the most current information available at this time.
+
+## Block Cache [#architecture-regionserver-block-cache]
+
+HBase provides two different BlockCache implementations to cache data read from HDFS: the default on-heap `LruBlockCache` and the `BucketCache`, which is (usually) off-heap. This section discusses benefits and drawbacks of each implementation, how to choose the appropriate option, and configuration options for each.
+
+
+ See the RegionServer UI for detail on caching deploy. See configurations, sizings, current usage,
+ time-in-the-cache, and even detail on block counts and types.
+
+
+### Cache Choices
+
+`LruBlockCache` is the original implementation, and is entirely within the Java heap. `BucketCache` is optional and mainly intended for keeping block cache data off-heap, although `BucketCache` can also be a file-backed cache. In file-backed we can either use it in the file mode or the mmaped mode. We also have pmem mode where the bucket cache resides on the persistent memory device.
+
+When you enable BucketCache, you are enabling a two tier caching system. We used to describe the tiers as "L1" and "L2" but have deprecated this terminology as of hbase-2.0.0. The "L1" cache referred to an instance of LruBlockCache and "L2" to an off-heap BucketCache. Instead, when BucketCache is enabled, all DATA blocks are kept in the BucketCache tier and meta blocks — INDEX and BLOOM blocks — are on-heap in the `LruBlockCache`. Management of these two tiers and the policy that dictates how blocks move between them is done by `CombinedBlockCache`.
+
+### General Cache Configurations
+
+Apart from the cache implementation itself, you can set some general configuration options to control how the cache performs. See [CacheConfig](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html). After setting any of these options, restart or rolling restart your cluster for the configuration to take effect. Check logs for errors or unexpected behavior.
+
+See also [Prefetch Option for Blockcache](/docs/performance#prefetch-option-for-blockcache), which discusses a new option introduced in [HBASE-9857](https://issues.apache.org/jira/browse/HBASE-9857).
+
+### LruBlockCache Design
+
+The LruBlockCache is an LRU cache that contains three levels of block priority to allow for scan-resistance and in-memory ColumnFamilies:
+
+- Single access priority: The first time a block is loaded from HDFS it normally has this priority and it will be part of the first group to be considered during evictions. The advantage is that scanned blocks are more likely to get evicted than blocks that are getting more usage.
+- Multi access priority: If a block in the previous priority group is accessed again, it upgrades to this priority. It is thus part of the second group considered during evictions.
+- In-memory access priority: If the block's family was configured to be "in-memory", it will be part of this priority disregarding the number of times it was accessed. Catalog tables are configured like this. This group is the last one considered during evictions.
+ To mark a column family as in-memory, call
+ ```java
+ HColumnDescriptor.setInMemory(true);
+ ```
+ if creating a table from java, or set `IN_MEMORY ⇒ true` when creating or altering a table in the shell: e.g.
+ ```java
+ hbase(main):003:0> create 't', {NAME => 'f', IN_MEMORY => 'true'}
+ ```
+ For more information, see the LruBlockCache source
+
+### LruBlockCache Usage
+
+Block caching is enabled by default for all the user tables which means that any read operation will load the LRU cache. This might be good for a large number of use cases, but further tunings are usually required in order to achieve better performance. An important concept is the [working set size](http://en.wikipedia.org/wiki/Working_set_size), or WSS, which is: "the amount of memory needed to compute the answer to a problem". For a website, this would be the data that's needed to answer the queries over a short amount of time.
+
+The way to calculate how much memory is available in HBase for caching is:
+
+```java
+number of region servers * heap size * hfile.block.cache.size * 0.99
+```
+
+The default value for the block cache is 0.4 which represents 40% of the available heap. The last value (99%) is the default acceptable loading factor in the LRU cache after which eviction is started. The reason it is included in this equation is that it would be unrealistic to say that it is possible to use 100% of the available memory since this would make the process blocking from the point where it loads new blocks. Here are some examples:
+
+- One region server with the heap size set to 1 GB and the default block cache size will have 405 MB of block cache available.
+- 20 region servers with the heap size set to 8 GB and a default block cache size will have 63.3 GB of block cache.
+- 100 region servers with the heap size set to 24 GB and a block cache size of 0.5 will have about 1.16 TB of block cache.
+
+Your data is not the only resident of the block cache. Here are others that you may have to take into account:
+
+- **Catalog Tables**
+ The `hbase:meta` table is forced into the block cache and have the in-memory priority which means that they are harder to evict.
+
+
+ The hbase:meta tables can occupy a few MBs depending on the number of regions.
+
+
+- **HFiles Indexes**
+ An _HFile_ is the file format that HBase uses to store data in HDFS. It contains a multi-layered index which allows HBase to seek the data without having to read the whole file. The size of those indexes is a factor of the block size (64KB by default), the size of your keys and the amount of data you are storing. For big data sets it's not unusual to see numbers around 1GB per region server, although not all of it will be in cache because the LRU will evict indexes that aren't used.
+
+- **Keys**
+ The values that are stored are only half the picture, since each value is stored along with its keys (row key, family qualifier, and timestamp). See [Try to minimize row and column sizes](/docs/regionserver-sizing#try-to-minimize-row-and-column-sizes).
+
+- **Bloom Filters**
+ Just like the HFile indexes, those data structures (when enabled) are stored in the LRU.
+
+Currently the recommended way to measure HFile indexes and bloom filters sizes is to look at the region server web UI and checkout the relevant metrics. For keys, sampling can be done by using the HFile command line tool and look for the average key size metric. Since HBase 0.98.3, you can view details on BlockCache stats and metrics in a special Block Cache section in the UI. As of HBase 2.4.14, you can estimate HFile indexes and bloom filters vs other DATA blocks using blockCacheCount and blockCacheDataBlockCount in JMX. The formula `(blockCacheCount - blockCacheDataBlockCount) * blockSize` will give you an estimate which can be useful when trying to enable the BucketCache. You should make sure the post-BucketCache config gives enough memory to the on-heap LRU cache to hold at least the same number of non-DATA blocks from pre-BucketCache. Once BucketCache is enabled, the L1 metrics like l1CacheSize, l1CacheCount, and l1CacheEvictionCount can help you further tune the size.
+
+It's generally bad to use block caching when the WSS doesn't fit in memory. This is the case when you have for example 40GB available across all your region servers' block caches but you need to process 1TB of data. One of the reasons is that the churn generated by the evictions will trigger more garbage collections unnecessarily. Here are two use cases:
+
+- Fully random reading pattern: This is a case where you almost never access the same row twice within a short amount of time such that the chance of hitting a cached block is close to 0. Setting block caching on such a table is a waste of memory and CPU cycles, more so that it will generate more garbage to pick up by the JVM. For more information on monitoring GC, see [JVM Garbage Collection Logs](/docs/troubleshooting#jvm-garbage-collection-logs).
+- Mapping a table: In a typical MapReduce job that takes a table in input, every row will be read only once so there's no need to put them into the block cache. The Scan object has the option of turning this off via the setCacheBlocks method (set it to false). You can still keep block caching turned on on this table if you need fast random read access. An example would be counting the number of rows in a table that serves live traffic, caching every block of that table would create massive churn and would surely evict data that's currently in use.
+
+#### Caching META blocks only (DATA blocks in fscache)
+
+An interesting setup is one where we cache META blocks only and we read DATA blocks in on each access. If the DATA blocks fit inside fscache, this alternative may make sense when access is completely random across a very large dataset. To enable this setup, alter your table and for each column family set `BLOCKCACHE ⇒ 'false'`. You are 'disabling' the BlockCache for this column family only. You can never disable the caching of META blocks. Since [HBASE-4683 Always cache index and bloom blocks](https://issues.apache.org/jira/browse/HBASE-4683), we will cache META blocks even if the BlockCache is disabled.
+
+### Off-heap Block Cache
+
+#### How to Enable BucketCache
+
+The usual deployment of BucketCache is via a managing class that sets up two caching tiers: an on-heap cache implemented by LruBlockCache and a second cache implemented with BucketCache. The managing class is [CombinedBlockCache](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.html) by default. The previous link describes the caching 'policy' implemented by CombinedBlockCache. In short, it works by keeping meta blocks — INDEX and BLOOM in the on-heap LruBlockCache tier — and DATA blocks are kept in the BucketCache tier.
+
+- **Pre-hbase-2.0.0 versions**
+
+ Fetching will always be slower when fetching from BucketCache in pre-hbase-2.0.0, as compared to the native on-heap LruBlockCache. However, latencies tend to be less erratic across time, because there is less garbage collection when you use BucketCache since it is managing BlockCache allocations, not the GC. If the BucketCache is deployed in off-heap mode, this memory is not managed by the GC at all. This is why you'd use BucketCache in pre-2.0.0, so your latencies are less erratic, to mitigate GCs and heap fragmentation, and so you can safely use more memory. See Nick Dimiduk's [BlockCache 101](http://www.n10k.com/blog/blockcache-101/) for comparisons running on-heap vs off-heap tests. Also see [Comparing BlockCache Deploys](https://web.archive.org/web/20231109025243/http://people.apache.org/~stack/bc/) which finds that if your dataset fits inside your LruBlockCache deploy, use it otherwise if you are experiencing cache churn (or you want your cache to exist beyond the vagaries of java GC), use BucketCache.
+
+ In pre-2.0.0, one can configure the BucketCache so it receives the `victim` of an LruBlockCache eviction. All Data and index blocks are cached in L1 first. When eviction happens from L1, the blocks (or `victims`) will get moved to L2. Set `cacheDataInL1` via `(HColumnDescriptor.setCacheDataInL1(true)` or in the shell, creating or amending column families setting `CACHE_DATA_IN_L1` to true: e.g.
+
+ ```java
+ hbase(main):003:0> create 't', {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}
+ ```
+
+- **hbase-2.0.0+ versions**
+
+ HBASE-11425 changed the HBase read path so it could hold the read-data off-heap avoiding copying of cached data on to the java heap. See [Offheap read-path](/docs/offheap-read-write#offheap-read-path). In hbase-2.0.0, off-heap latencies approach those of on-heap cache latencies with the added benefit of NOT provoking GC.
+
+ From HBase 2.0.0 onwards, the notions of L1 and L2 have been deprecated. When BucketCache is turned on, the DATA blocks will always go to BucketCache and INDEX/BLOOM blocks go to on heap LRUBlockCache. `cacheDataInL1` support has been removed.
+
+#### BucketCache Deploy Modes
+
+The BucketCache Block Cache can be deployed _offheap_, _file_ or _mmaped_ file mode.
+
+You set which via the `hbase.bucketcache.ioengine` setting. Setting it to `offheap` will have BucketCache make its allocations off-heap, and an ioengine setting of `file:PATH_TO_FILE` will direct BucketCache to use file caching (Useful in particular if you have some fast I/O attached to the box such as SSDs). From 2.0.0, it is possible to have more than one file backing the BucketCache. This is very useful especially when the Cache size requirement is high. For multiple backing files, configure ioengine as `files:PATH_TO_FILE1,PATH_TO_FILE2,PATH_TO_FILE3`. BucketCache can be configured to use an mmapped file also. Configure ioengine as `mmap:PATH_TO_FILE` for this.
+
+It is possible to deploy a tiered setup where we bypass the CombinedBlockCache policy and have BucketCache working as a strict L2 cache to the L1 LruBlockCache. For such a setup, set `hbase.bucketcache.combinedcache.enabled` to `false`. In this mode, on eviction from L1, blocks go to L2. When a block is cached, it is cached first in L1. When we go to look for a cached block, we look first in L1 and if none found, then search L2. Let us call this deploy format, _Raw L1+L2_. NOTE: This L1+L2 mode is removed from 2.0.0. When BucketCache is used, it will be strictly the DATA cache and the LruBlockCache will cache INDEX/META blocks.
+
+Other BucketCache configs include: specifying a location to persist cache to across restarts, how many threads to use writing the cache, etc. See the [CacheConfig.html](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/hfile/CacheConfig.html) class for configuration options and descriptions.
+
+To check it enabled, look for the log line describing cache setup; it will detail how BucketCache has been deployed. Also see the UI. It will detail the cache tiering and their configuration.
+
+#### BucketCache Example Configuration
+
+This sample provides a configuration for a 4 GB off-heap BucketCache with a 1 GB on-heap cache.
+
+Configuration is performed on the RegionServer.
+
+Setting `hbase.bucketcache.ioengine` and `hbase.bucketcache.size` \> 0 enables `CombinedBlockCache`. Let us presume that the RegionServer has been set to run with a 5G heap: i.e. `HBASE_HEAPSIZE=5g`.
+
+1. First, edit the RegionServer's _hbase-env.sh_ and set `HBASE_OFFHEAPSIZE` to a value greater than the off-heap size wanted, in this case, 4 GB (expressed as 4G). Let's set it to 5G. That'll be 4G for our off-heap cache and 1G for any other uses of off-heap memory (there are other users of off-heap memory other than BlockCache; e.g. DFSClient in RegionServer can make use of off-heap memory). See Direct Memory Usage In HBase below.
+
+ ```java
+ HBASE_OFFHEAPSIZE=5G
+ ```
+
+2. Next, add the following configuration to the RegionServer's _hbase-site.xml_.
+
+ ```xml
+
+ hbase.bucketcache.ioengine
+ offheap
+
+
+ hfile.block.cache.size
+ 0.2
+
+
+ hbase.bucketcache.size
+ 4196
+
+ ```
+
+3. Restart or rolling restart your cluster, and check the logs for any issues.
+
+In the above, we set the BucketCache to be 4G. We configured the on-heap LruBlockCache have 20% (0.2) of the RegionServer's heap size (0.2 \* 5G = 1G). In other words, you configure the L1 LruBlockCache as you would normally (as if there were no L2 cache present).
+
+[HBASE-10641](https://issues.apache.org/jira/browse/HBASE-10641) introduced the ability to configure multiple sizes for the buckets of the BucketCache, in HBase 0.98 and newer. To configurable multiple bucket sizes, configure the new property `hbase.bucketcache.bucket.sizes` to a comma-separated list of block sizes, ordered from smallest to largest, with no spaces. The goal is to optimize the bucket sizes based on your data access patterns. The following example configures buckets of size 4096 and 8192.
+
+```xml
+
+ hbase.bucketcache.bucket.sizes
+ 4096,8192
+
+```
+
+
+The default maximum direct memory varies by JVM. Traditionally it is 64M or some relation to allocated heap size (-Xmx) or no limit at all (JDK7 apparently). HBase servers use direct memory, in particular short-circuit reading (See [Leveraging local data](/docs/performance#leveraging-local-data)), the hosted DFSClient will allocate direct memory buffers. How much the DFSClient uses is not easy to quantify; it is the number of open HFiles \* `hbase.dfs.client.read.shortcircuit.buffer.size` where `hbase.dfs.client.read.shortcircuit.buffer.size` is set to 128k in HBase — see *hbase-default.xml* default configurations. If you do off-heap block caching, you'll be making use of direct memory. The RPCServer uses a ByteBuffer pool. From 2.0.0, these buffers are off-heap ByteBuffers. Starting your JVM, make sure the `-XX:MaxDirectMemorySize` setting in *conf/hbase-env.sh* considers off-heap BlockCache (`hbase.bucketcache.size`), DFSClient usage, RPC side ByteBufferPool max size. This has to be bit higher than sum of off heap BlockCache size and max ByteBufferPool size. Allocating an extra of 1-2 GB for the max direct memory size has worked in tests. Direct memory, which is part of the Java process heap, is separate from the object heap allocated by -Xmx. The value allocated by `MaxDirectMemorySize` must not exceed physical RAM, and is likely to be less than the total available RAM due to other memory requirements and system constraints.
+
+You can see how much memory — on-heap and off-heap/direct — a RegionServer is configured to use and how much it is using at any one time by looking at the _Server Metrics: Memory_ tab in the UI. It can also be gotten via JMX. In particular the direct memory currently used by the server can be found on the `java.nio.type=BufferPool,name=direct` bean. Terracotta has a [good write up](https://web.archive.org/web/20170907032911/http://terracotta.org/documentation/4.0/bigmemorygo/configuration/storage-options) on using off-heap memory in Java. It is for their product BigMemory but a lot of the issues noted apply in general to any attempt at going off-heap. Check it out.
+
+
+
+
+This is a pre-HBase 1.0 configuration removed because it was confusing. It was a float that you would set to some value between 0.0 and 1.0. Its default was 0.9. If the deploy was using CombinedBlockCache, then the LruBlockCache L1 size was calculated to be `(1 - hbase.bucketcache.percentage.in.combinedcache) * size-of-bucketcache` and the BucketCache size was `hbase.bucketcache.percentage.in.combinedcache * size-of-bucket-cache`. where size-of-bucket-cache itself is EITHER the value of the configuration `hbase.bucketcache.size` IF it was specified as Megabytes OR `hbase.bucketcache.size` \* `-XX:MaxDirectMemorySize` if `hbase.bucketcache.size` is between 0 and 1.0.
+
+In 1.0, it should be more straight-forward. Onheap LruBlockCache size is set as a fraction of java heap using `hfile.block.cache.size setting` (not the best name) and BucketCache is set as above in absolute Megabytes.
+
+
+
+### Time Based Priority for BucketCache
+
+[HBASE-28463](https://issues.apache.org/jira/browse/HBASE-28463) introduced time based priority for blocks in BucketCache. It allows for defining an age threshold at individual column families' configuration, whereby blocks older than this configured threshold would be targeted first for eviction.
+
+Blocks from column families that don't define the age threshold wouldn't be evaluated by the time based priority, and would only be evicted following the LRU eviction logic.
+
+This feature is mostly useful for use cases where most recent data is more frequently accessed, and therefore should get higher priority in the cache. Configuring Time Based Priority with the "age" of most accessed data would then give a finer control over blocks allocation in the BucketCache than the built-in LRU eviction logic.
+
+Time Based Priority for BucketCache provides three different strategies for defining data age:
+
+- Cell timestamps: Uses the timestamp portion of HBase cells for comparing the data age.
+- Custom cell qualifiers: Uses a custom-defined date qualifier for comparing the data age. It uses that value to tier the entire row containing the given qualifier value. This requires that the custom qualifier be a valid Java long timestamp.
+- Custom value provider: Allows for defining a pluggable implementation that contains the logic for identifying the date value to be used for comparison. This also provides additional flexibility for different use cases that might have the date stored in other formats or embedded with other data in various portions of a given row.
+
+For use cases where priority is determined by the order of record ingestion in HBase (with the most recent being the most relevant), the built-in cell timestamp offers the most convenient and efficient method for configuring age-based priority. See [Using Cell timestamps for Time Based Priority](/docs/architecture/regionserver#using-cell-timestamps-for-time-based-priority).
+
+Some applications may utilize a custom date column to define the priority of table records. In such instances, a custom cell qualifier-based priority is advisable. See [Using Custom Cell Qualifiers for Time Based Priority](/docs/architecture/regionserver#using-custom-cell-qualifiers-for-time-based-priority).
+
+Finally, more intricate schemas may incorporate domain-specific logic for defining the age of each record. The custom value provider facilitates the integration of custom code to implement the appropriate parsing of the date value that should be used for the priority comparison. See [Using a Custom value provider for Time Based Priority](/docs/architecture/regionserver#using-a-custom-value-provider-for-time-based-priority).
+
+With Time Based Priority for BucketCache, blocks age is evaluated when deciding if a block should be cached (i.e. during reads, writes, compaction and prefetch), as well as during the cache freeSpace run (mass eviction), prior to executing the LRU logic.
+
+Because blocks don't hold any specific meta information other than type, it's necessary to group blocks of the same "age group" on separate files, using specialized compaction implementations (see more details in the configuration section below). The time range of all blocks in each file is then appended at the file meta info section, and is used for evaluating the age of blocks that should be considered in the Time Based Priority logic.
+
+#### Configuring Time Based Priority for BucketCache
+
+Finding the age of each block involves an extra overhead, therefore the feature is disabled by default at a global configuration level.
+
+To enable it, the following configuration should be set on RegionServers' _hbase-site.xml_:
+
+```xml
+
+ hbase.regionserver.datatiering.enable
+ true
+
+```
+
+Once enabled globally, it's necessary to define the desired strategy-specific settings at the individual column family level.
+
+#### Using Cell timestamps for Time Based Priority
+
+This strategy is the most efficient to run, as it uses the timestamp portion of each cell containing the data for comparing the age of blocks. It requires DateTieredCompaction for splitting the blocks into separate files according to blocks' ages.
+
+The example below sets the hot age threshold to one week (in milliseconds) for the column family 'cf1' in table 'orders':
+
+```java
+hbase(main):003:0> alter 'orders', {NAME => 'cf1',
+ CONFIGURATION => {'hbase.hstore.datatiering.type' => 'TIME_RANGE',
+ 'hbase.hstore.datatiering.hot.age.millis' => '604800000',
+ 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine',
+ 'hbase.hstore.blockingStoreFiles' => '60',
+ 'hbase.hstore.compaction.min' => '2',
+ 'hbase.hstore.compaction.max' => '60'
+ }
+}
+```
+
+
+In the example above, the properties governing the number of windows and period of each window in the date tiered compaction were not set. With the default settings, the compaction will create initially four windows of six hours, then four windows of one day each, then another four windows of four days each and so on until the minimum timestamp among the selected files is covered. This can create a large number of files, therefore, additional changes to the 'hbase.hstore.blockingStoreFiles', 'hbase.hstore.compaction.min' and 'hbase.hstore.compaction.max' are recommended.
+
+Alternatively, consider adjusting the initial window size to the same as the hot age threshold, and two windows only per tier:
+
+```java
+hbase(main):003:0> alter 'orders', {NAME => 'cf1',
+ CONFIGURATION => {'hbase.hstore.datatiering.type' => 'TIME_RANGE',
+ 'hbase.hstore.datatiering.hot.age.millis' => '604800000',
+ 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine',
+ 'hbase.hstore.compaction.date.tiered.base.window.millis' => '604800000',
+ 'hbase.hstore.compaction.date.tiered.windows.per.tier' => '2'
+ }
+}
+```
+
+
+
+#### Using Custom Cell Qualifiers for Time Based Priority
+
+This strategy uses a new compaction implementation designed for Time Based Priority. It extends date tiered compaction, but instead of producing multiple tiers of various time windows, it simply splits files into two groups: the "cold" group, where all blocks are older than the defined threshold age, and the "hot" group, where all blocks are newer than the threshold age.
+
+The example below defines a cell qualifier 'event_date' to be used for comparing the age of blocks within the custom cell qualifier strategy:
+
+```java
+hbase(main):003:0> alter 'orders', {NAME => 'cf1',
+ CONFIGURATION => {'hbase.hstore.datatiering.type' => 'CUSTOM',
+ 'TIERING_CELL_QUALIFIER' => 'event_date',
+ 'hbase.hstore.datatiering.hot.age.millis' => '604800000',
+ 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.CustomTieredStoreEngine',
+ 'hbase.hstore.compaction.date.tiered.custom.age.limit.millis' => '604800000'
+ }
+}
+```
+
+
+ Note that there are two different configurations for defining the hot age threshold. This is
+ because the Time Based Priority enforcer operates independently of the compaction implementation.
+
+
+#### Using a Custom value provider for Time Based Priority
+
+It's also possible to hook in domain-specific logic for defining the data age of each row to be used for comparing blocks priorities. The Custom Time Based Priority framework defines the `CustomTieredCompactor.TieringValueProvider` interface, which can be implemented to provide the specific date value to be used by compaction for grouping the blocks according to the threshold age.
+
+In the following example, the `RowKeyPortionTieringValueProvider` implements the `getTieringValue` method. This method parses the date from a segment of the row key value, specifically between positions 14 and 29, using the "yyyyMMddHHmmss" format. The parsed date is then returned as a long timestamp, which is then used by custom tiered compaction to group the blocks based on the defined hot age threshold:
+
+```java
+public class RowKeyPortionTieringValueProvider implements CustomTieredCompactor.TieringValueProvider {
+ private SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
+ @Override
+ public void init(Configuration configuration) throws Exception {}
+
+ @Override
+ public long getTieringValue(Cell cell) {
+ byte[] rowArray = new byte[cell.getRowLength()];
+ System.arraycopy(cell.getRowArray(), cell.getRowOffset(), rowArray, 0, cell.getRowLength());
+ String datePortion = Bytes.toString(rowArray).substring(14, 29).trim();
+ try {
+ return sdf.parse(datePortion).getTime();
+ } catch (ParseException e) {
+ //handle error
+ }
+ return Long.MAX_VALUE;
+ }
+}
+```
+
+The Tiering Value Provider above can then be configured for Time Based Priority as follows:
+
+```java
+hbase(main):003:0> alter 'orders', {NAME => 'cf1',
+ CONFIGURATION => {'hbase.hstore.datatiering.type' => 'CUSTOM',
+ 'hbase.hstore.custom-tiering-value.provider.class' =>
+ 'org.apache.hbase.client.example.RowKeyPortionTieringValueProvider',
+ 'hbase.hstore.datatiering.hot.age.millis' => '604800000',
+ 'hbase.hstore.engine.class' => 'org.apache.hadoop.hbase.regionserver.CustomTieredStoreEngine',
+ 'hbase.hstore.compaction.date.tiered.custom.age.limit.millis' => '604800000'
+ }
+}
+```
+
+
+ Upon enabling Custom Time Based Priority (either the custom qualifier or custom value provider) in
+ the column family configuration, it is imperative that major compaction be executed twice on the
+ specified tables to ensure the effective application of the newly configured priorities within the
+ bucket cache.
+
+
+
+Time Based Priority was originally implemented with the cell timestamp strategy only. The original design covering cell timestamp based strategy is available [here](https://docs.google.com/document/d/1Qd3kvZodBDxHTFCIRtoePgMbvyuUSxeydi2SEWQFQro/edit?tab=t.0#heading=h.gjdgxs).
+
+The second phase including the two custom strategies mentioned above is detailed in [this separate design doc](https://docs.google.com/document/d/1uBGIO9IQ-FbSrE5dnUMRtQS23NbCbAmRVDkAOADcU_E/edit?tab=t.0).
+
+
+
+### Compressed BlockCache
+
+[HBASE-11331](https://issues.apache.org/jira/browse/HBASE-11331) introduced lazy BlockCache decompression, more simply referred to as compressed BlockCache. When compressed BlockCache is enabled data and encoded data blocks are cached in the BlockCache in their on-disk format, rather than being decompressed and decrypted before caching.
+
+For a RegionServer hosting more data than can fit into cache, enabling this feature with SNAPPY compression has been shown to result in 50% increase in throughput and 30% improvement in mean latency while, increasing garbage collection by 80% and increasing overall CPU load by 2%. See HBASE-11331 for more details about how performance was measured and achieved. For a RegionServer hosting data that can comfortably fit into cache, or if your workload is sensitive to extra CPU or garbage-collection load, you may receive less benefit.
+
+The compressed BlockCache is disabled by default. To enable it, set `hbase.block.data.cachecompressed` to `true` in _hbase-site.xml_ on all RegionServers.
+
+### Cache Aware Load Balancer
+
+Depending on the data size and the configured cache size, the cache warm up can take anywhere from a few minutes to a few hours. This becomes even more critical for HBase deployments over cloud storage, where compute is separated from storage. Doing this everytime the region server starts can be a very expensive process. To eliminate this, [HBASE-27313](https://issues.apache.org/jira/browse/HBASE-27313) implemented the cache persistence feature where the region servers periodically persist the blocks cached in the bucket cache. This persisted information is then used to resurrect the cache in the event of a region server restart because of normal restart or crash.
+
+[HBASE-27999](https://issues.apache.org/jira/browse/HBASE-27999) implements the cache aware load balancer, which adds to the load balancer the ability to consider the cache allocation of each region on region servers when calculating a new assignment plan, using the region/region server cache allocation information reported by region servers to calculate the percentage of HFiles cached for each region on the hosting server. This information is then used by the balancer as a factor when deciding on an optimal, new assignment plan.
+
+The master node captures the caching information from all the region servers and uses this information to decide on new region assignments while ensuring a minimal impact on the current cache allocation. A region is assigned to the region server where it has a better cache ratio as compared to the region server where it is currently hosted.
+
+The CacheAwareLoadBalancer uses two cost elements for deciding the region allocation. These are described below:
+
+1. **Cache Cost**
+ The cache cost is calculated as the percentage of data for a region cached on the region server where it is either currently hosted or was previously hosted. A region may have multiple HFiles, each of different sizes. A HFile is considered to be fully prefetched when all the data blocks in this file are in the cache. The region server hosting this region calculates the ratio of number of HFiles fully cached in the cache to the total number of HFiles in the region. This ratio will vary from 0 (region hosted on this server, but none of its HFiles are cached into the cache) to 1 (region hosted on this server and all the HFiles for this region are cached into the cache).
+ Every region server maintains this information for all the regions currently hosted there. In addition to that, this cache ratio is also maintained for the regions which were previously hosted on this region server giving historical information about the regions.
+2. Skewness Cost
+
+The cache aware balancer will consider cache cost with the skewness cost to decide on the region assignment plan under following conditions:
+
+1. There is an idle server in the cluster. This can happen when an existing server is restarted or a new server is added to the cluster.
+2. When the cost of maintaining the balance in the cluster is greater than the minimum threshold defined by the configuration _hbase.master.balancer.stochastic.minCostNeedBalance_.
+
+The CacheAwareLoadBalancer can be enabled in the cluster by setting the following configuration properties in the master master configuration:
+
+```xml
+
+ hbase.master.loadbalancer.class
+ org.apache.hadoop.hbase.master.balancer.CacheAwareLoadBalancer
+
+
+ hbase.bucketcache.persistent.path
+ /path/to/bucketcache_persistent_file
+
+```
+
+Within HBASE-29168, the CacheAwareLoadBalancer implements region move throttling. This mitigates the impact of "losing" cache factor when balancing mainly due to region skewness, i.e. when new region servers are added to the cluster, a large bulk of cached regions may move to the new servers at once, which can cause noticeable read performance impacts for cache sensitive use cases. The throttling sleep time is determined by the **hbase.master.balancer.move.throttlingMillis** property, and it defaults to 60000 millis. If a region planned to be moved has a cache ratio on the target server above the thershold configurable by the **hbase.master.balancer.stochastic.throttling.cacheRatio** property (80% by default), no throttling will be applied in this region move.
+
+## RegionServer Splitting Implementation
+
+As write requests are handled by the region server, they accumulate in an in-memory storage system called the _memstore_. Once the memstore fills, its content are written to disk as additional store files. This event is called a _memstore flush_. As store files accumulate, the RegionServer will [compact](/docs/architecture/regions#compaction) them into fewer, larger files. After each flush or compaction finishes, the amount of data stored in the region has changed. The RegionServer consults the region split policy to determine if the region has grown too large or should be split for another policy-specific reason. A region split request is enqueued if the policy recommends it.
+
+Logically, the process of splitting a region is simple. We find a suitable point in the keyspace of the region where we should divide the region in half, then split the region's data into two new regions at that point. The details of the process however are not simple. When a split happens, the newly created _daughter regions_ do not rewrite all the data into new files immediately. Instead, they create small files similar to symbolic link files, named [Reference files](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/Reference.html), which point to either the top or bottom part of the parent store file according to the split point. The reference file is used just like a regular data file, but only half of the records are considered. The region can only be split if there are no more references to the immutable data files of the parent region. Those reference files are cleaned gradually by compactions, so that the region will stop referring to its parents files, and can be split further.
+
+Although splitting the region is a local decision made by the RegionServer, the split process itself must coordinate with many actors. The RegionServer notifies the Master before and after the split, updates the `.META.` table so that clients can discover the new daughter regions, and rearranges the directory structure and data files in HDFS. Splitting is a multi-task process. To enable rollback in case of an error, the RegionServer keeps an in-memory journal about the execution state. The steps taken by the RegionServer to execute the split are illustrated in the "RegionServer Split Process" schema below. Each step is labeled with its step number. Actions from RegionServers or Master are shown in red, while actions from the clients are shown in green.
+
+
+
+1. The RegionServer decides locally to split the region, and prepares the split. **THE SPLIT TRANSACTION IS STARTED.** As a first step, the RegionServer acquires a shared read lock on the table to prevent schema modifications during the splitting process. Then it creates a znode in zookeeper under `/hbase/region-in-transition/region-name`, and sets the znode's state to `SPLITTING`.
+2. The Master learns about this znode, since it has a watcher for the parent `region-in-transition` znode.
+3. The RegionServer creates a sub-directory named `.splits` under the parent's `region` directory in HDFS.
+4. The RegionServer closes the parent region and marks the region as offline in its local data structures. **THE SPLITTING REGION IS NOW OFFLINE.** At this point, client requests coming to the parent region will throw `NotServingRegionException`. The client will retry with some backoff. The closing region is flushed.
+5. The RegionServer creates region directories under the `.splits` directory, for daughter regions A and B, and creates necessary data structures. Then it splits the store files, in the sense that it creates two Reference files per store file in the parent region. Those reference files will point to the parent region's files.
+6. The RegionServer creates the actual region directory in HDFS, and moves the reference files for each daughter.
+7. The RegionServer sends a `Put` request to the `.META.` table, to set the parent as offline in the `.META.` table and add information about daughter regions. At this point, there won't be individual entries in `.META.` for the daughters. Clients will see that the parent region is split if they scan `.META.`, but won't know about the daughters until they appear in `.META.`. Also, if this `Put` to `.META`. succeeds, the parent will be effectively split. If the RegionServer fails before this RPC succeeds, Master and the next Region Server opening the region will clean dirty state about the region split. After the `.META.` update, though, the region split will be rolled-forward by Master.
+8. The RegionServer opens daughters A and B in parallel.
+9. The RegionServer adds the daughters A and B to `.META.`, together with information that it hosts the regions. **THE SPLIT REGIONS (DAUGHTERS WITH REFERENCES TO PARENT) ARE NOW ONLINE.** After this point, clients can discover the new regions and issue requests to them. Clients cache the `.META.` entries locally, but when they make requests to the RegionServer or `.META.`, their caches will be invalidated, and they will learn about the new regions from `.META.`.
+10. The RegionServer updates znode `/hbase/region-in-transition/region-name` in ZooKeeper to state `SPLIT`, so that the master can learn about it. The balancer can freely re-assign the daughter regions to other region servers if necessary. **THE SPLIT TRANSACTION IS NOW FINISHED.**
+11. After the split, `.META.` and HDFS will still contain references to the parent region. Those references will be removed when compactions in daughter regions rewrite the data files. Garbage collection tasks in the master periodically check whether the daughter regions still refer to the parent region's files. If not, the parent region will be removed.
+
+## Write Ahead Log (WAL)
+
+### Purpose
+
+The _Write Ahead Log (WAL)_ records all changes to data in HBase, to file-based storage. Under normal operations, the WAL is not needed because data changes move from the MemStore to StoreFiles. However, if a RegionServer crashes or becomes unavailable before the MemStore is flushed, the WAL ensures that the changes to the data can be replayed. If writing to the WAL fails, the entire operation to modify the data fails.
+
+HBase uses an implementation of the [WAL](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/wal/WAL.html) interface. Usually, there is only one instance of a WAL per RegionServer. An exception is the RegionServer that is carrying _hbase:meta_; the _meta_ table gets its own dedicated WAL. The RegionServer records Puts and Deletes to its WAL, before recording them these Mutations [MemStore](/docs/architecture/regions#memstore) for the affected [Store](/docs/architecture/regions#store).
+
+
+ Prior to 2.0, the interface for WALs in HBase was named `HLog`. In 0.94, HLog was the name of the
+ implementation of the WAL. You will likely find references to the HLog in documentation tailored
+ to these older versions.
+
+
+The WAL resides in HDFS in the _/hbase/WALs/_ directory, with subdirectories per RegionServer.
+
+For more general information about the concept of write ahead logs, see the Wikipedia [Write-Ahead Log](http://en.wikipedia.org/wiki/Write-ahead_logging) article.
+
+### WAL Providers
+
+In HBase, there are a number of WAL implementations (or 'Providers'). Each is known by a short name label (that unfortunately is not always descriptive). You set the provider in _hbase-site.xml_ passing the WAL provider short-name as the value on the _hbase.wal.provider_ property (Set the provider for _hbase:meta_ using the _hbase.wal.meta_provider_ property, otherwise it uses the same provider configured by _hbase.wal.provider_).
+
+- _asyncfs_: The **default**. New since hbase-2.0.0 (HBASE-15536, HBASE-14790). This _AsyncFSWAL_ provider, as it identifies itself in RegionServer logs, is built on a new non-blocking dfsclient implementation. It is currently resident in the hbase codebase but intent is to move it back up into HDFS itself. WALs edits are written concurrently ("fan-out") style to each of the WAL-block replicas on each DataNode rather than in a chained pipeline as the default client does. Latencies should be better. See [Apache HBase Improvements and Practices at Xiaomi](https://www.slideshare.net/HBaseCon/apache-hbase-improvements-and-practices-at-xiaomi) at slide 14 onward for more detail on implementation.
+- _filesystem_: This was the default in hbase-1.x releases. It is built on the blocking _DFSClient_ and writes to replicas in classic _DFSCLient_ pipeline mode. In logs it identifies as _FSHLog_ or _FSHLogProvider_.
+- _multiwal_: This provider is made of multiple instances of _asyncfs_ or _filesystem_. See the next section for more on _multiwal_.
+
+Look for the lines like the below in the RegionServer log to see which provider is in place (The below shows the default AsyncFSWALProvider):
+
+```java
+2018-04-02 13:22:37,983 INFO [regionserver/ve0528:16020] wal.WALFactory: Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider
+```
+
+
+ As the *AsyncFSWAL* hacks into the internal of DFSClient implementation, it will be easily broken
+ by upgrading the hadoop dependencies, even for a simple patch release. So if you do not specify
+ the wal provider explicitly, we will first try to use the *asyncfs*, if failed, we will fall back
+ to use *filesystem*. And notice that this may not always work, so if you still have problem
+ starting HBase due to the problem of starting *AsyncFSWAL*, please specify *filesystem* explicitly
+ in the config file.
+
+
+
+ EC support has been added to hadoop-3.x, and it is incompatible with WAL as the EC output stream
+ does not support hflush/hsync. In order to create a non-EC file in an EC directory, we need to use
+ the new builder-based create API for *FileSystem*, but it is only introduced in hadoop-2.9+ and
+ for HBase we still need to support hadoop-2.7.x. So please do not enable EC for the WAL directory
+ until we find a way to deal with it.
+
+
+### MultiWAL
+
+With a single WAL per RegionServer, the RegionServer must write to the WAL serially, because HDFS files must be sequential. This causes the WAL to be a performance bottleneck.
+
+HBase 1.0 introduces support MultiWal in [HBASE-5699](https://issues.apache.org/jira/browse/HBASE-5699). MultiWAL allows a RegionServer to write multiple WAL streams in parallel, by using multiple pipelines in the underlying HDFS instance, which increases total throughput during writes. This parallelization is done by partitioning incoming edits by their Region. Thus, the current implementation will not help with increasing the throughput to a single Region.
+
+RegionServers using the original WAL implementation and those using the MultiWAL implementation can each handle recovery of either set of WALs, so a zero-downtime configuration update is possible through a rolling restart.
+
+#### Configure MultiWAL
+
+To configure MultiWAL for a RegionServer, set the value of the property `hbase.wal.provider` to `multiwal` by pasting in the following XML:
+
+```xml
+
+ hbase.wal.provider
+ multiwal
+
+```
+
+Restart the RegionServer for the changes to take effect.
+
+To disable MultiWAL for a RegionServer, unset the property and restart the RegionServer.
+
+### WAL Flushing
+
+TODO (describe).
+
+### WAL Splitting
+
+A RegionServer serves many regions. All of the regions in a region server share the same active WAL file. Each edit in the WAL file includes information about which region it belongs to. When a region is opened, the edits in the WAL file which belong to that region need to be replayed. Therefore, edits in the WAL file must be grouped by region so that particular sets can be replayed to regenerate the data in a particular region. The process of grouping the WAL edits by region is called _log splitting_. It is a critical process for recovering data if a region server fails.
+
+Log splitting is done by the HMaster during cluster start-up or by the ServerShutdownHandler as a region server shuts down. So that consistency is guaranteed, affected regions are unavailable until data is restored. All WAL edits need to be recovered and replayed before a given region can become available again. As a result, regions affected by log splitting are unavailable until the process completes.
+
+#### Procedure: Log Splitting, Step by Step
+
+
+
+
+
+##### The `/hbase/WALs/HOST,PORT,STARTCODE` directory is renamed
+
+Renaming the directory is important because a RegionServer may still be up and accepting requests even if the HMaster thinks it is down. If the RegionServer does not respond immediately and does not heartbeat its ZooKeeper session, the HMaster may interpret this as a RegionServer failure. Renaming the logs directory ensures that existing, valid WAL files which are still in use by an active but busy RegionServer are not written to by accident.
+
+The new directory is named according to the following pattern:
+
+```text
+/hbase/WALs/HOST,PORT,STARTCODE-splitting
+```
+
+An example of such a renamed directory might look like the following:
+
+```text
+/hbase/WALs/srv.example.com,60020,1254173957298-splitting
+```
+
+
+
+
+
+##### Each log file is split, one at a time
+
+The log splitter reads the log file one edit entry at a time and puts each edit entry into the buffer corresponding to the edit's region. At the same time, the splitter starts several writer threads. Writer threads pick up a corresponding buffer and write the edit entries in the buffer to a temporary recovered edit file. The temporary edit file is stored to disk with the following naming pattern:
+
+```text
+/hbase/TABLE_NAME/REGION_ID/recovered.edits/.temp
+```
+
+This file is used to store all the edits in the WAL log for this region. After log splitting completes, the _.temp_ file is renamed to the sequence ID of the first log written to the file.
+
+To determine whether all edits have been written, the sequence ID is compared to the sequence of the last edit that was written to the HFile. If the sequence of the last edit is greater than or equal to the sequence ID included in the file name, it is clear that all writes from the edit file have been completed.
+
+
+
+
+
+##### After log splitting is complete, each affected region is assigned to a RegionServer
+
+When the region is opened, the _recovered.edits_ folder is checked for recovered edits files. If any such files are present, they are replayed by reading the edits and saving them to the MemStore. After all edit files are replayed, the contents of the MemStore are written to disk (HFile) and the edit files are deleted.
+
+
+
+
+
+#### Handling of Errors During Log Splitting
+
+If you set the `hbase.hlog.split.skip.errors` option to `true`, errors are treated as follows:
+
+- Any error encountered during splitting will be logged.
+- The problematic WAL log will be moved into the _.corrupt_ directory under the hbase `rootdir`,
+- Processing of the WAL will continue
+
+If the `hbase.hlog.split.skip.errors` option is set to `false`, the default, the exception will be propagated and the split will be logged as failed. See [HBASE-2958 When hbase.hlog.split.skip.errors is set to false, we fail the split but that's it](https://issues.apache.org/jira/browse/HBASE-2958). We need to do more than just fail split if this flag is set.
+
+#### How EOFExceptions are treated when splitting a crashed RegionServer's WALs
+
+If an EOFException occurs while splitting logs, the split proceeds even when `hbase.hlog.split.skip.errors` is set to `false`. An EOFException while reading the last log in the set of files to split is likely, because the RegionServer was likely in the process of writing a record at the time of a crash. For background, see [HBASE-2643 Figure how to deal with eof splitting logs](https://issues.apache.org/jira/browse/HBASE-2643)
+
+#### Performance Improvements during Log Splitting
+
+WAL log splitting and recovery can be resource intensive and take a long time, depending on the number of RegionServers involved in the crash and the size of the regions. [Distributed log splitting](/docs/architecture/regionserver#enabling-or-disabling-distributed-log-splitting) was developed to improve performance during log splitting.
+
+#### Enabling or Disabling Distributed Log Splitting
+
+Distributed log processing is enabled by default since HBase 0.92. The setting is controlled by the `hbase.master.distributed.log.splitting` property, which can be set to `true` or `false`, but defaults to `true`.
+
+### WAL splitting based on procedureV2
+
+After HBASE-20610, we introduce a new way to do WAL splitting coordination by procedureV2 framework. This can simplify the process of WAL splitting and no need to connect zookeeper any more.
+
+#### Background [!toc]
+
+Currently, splitting WAL processes are coordinated by zookeeper. Each region server are trying to grab tasks from zookeeper. And the burden becomes heavier when the number of region server increase.
+
+#### Implementation on Master side [!toc]
+
+During ServerCrashProcedure, SplitWALManager will create one SplitWALProcedure for each WAL file which should be split. Then each SplitWALProcedure will spawn a SplitWalRemoteProcedure to send the request to region server. SplitWALProcedure is a StateMachineProcedure and here is the state transfer diagram.
+
+
+
+#### Implementation on Region Server side [!toc]
+
+Region Server will receive a SplitWALCallable and execute it, which is much more straightforward than before. It will return null if success and return exception if there is any error.
+
+#### Performance [!toc] [#architecture-regionserver-wal-splitting-based-on-procedurev2-performance]
+
+According to tests on a cluster which has 5 regionserver and 1 master. procedureV2 coordinated WAL splitting has a better performance than ZK coordinated WAL splitting no master when restarting the whole cluster or one region server crashing.
+
+#### Enable this feature [!toc]
+
+To enable this feature, first we should ensure our package of HBase already contains these code. If not, please upgrade the package of HBase cluster without any configuration change first. Then change configuration 'hbase.split.wal.zk.coordinated' to false. Rolling upgrade the master with new configuration. Now WAL splitting are handled by our new implementation. But region server are still trying to grab tasks from zookeeper, we can rolling upgrade the region servers with the new configuration to stop that.
+
+- Steps as follows:
+ - Upgrade whole cluster to get the new Implementation.
+ - Upgrade Master with new configuration 'hbase.split.wal.zk.coordinated'=false.
+ - Upgrade region server to stop grab tasks from zookeeper.
+
+### WAL Compression
+
+The content of the WAL can be compressed using LRU Dictionary compression. This can be used to speed up WAL replication to different datanodes. The dictionary can store up to 215 elements; eviction starts after this number is exceeded.
+
+To enable WAL compression, set the `hbase.regionserver.wal.enablecompression` property to `true`. The default value for this property is `false`. By default, WAL tag compression is turned on when WAL compression is enabled. You can turn off WAL tag compression by setting the `hbase.regionserver.wal.tags.enablecompression` property to 'false'.
+
+A possible downside to WAL compression is that we lose more data from the last block in the WAL if it is ill-terminated mid-write. If entries in this last block were added with new dictionary entries but we failed persist the amended dictionary because of an abrupt termination, a read of this last block may not be able to resolve last-written entries.
+
+### Durability
+
+It is possible to set _durability_ on each Mutation or on a Table basis. Options include:
+
+- _SKIP_WAL_: Do not write Mutations to the WAL (See the next section, [Disabling the WAL](/docs/architecture/regionserver#disabling-the-wal)).
+- _ASYNC_WAL_: Write the WAL asynchronously; do not hold-up clients waiting on the sync of their write to the filesystem but return immediately. The edit becomes visible. Meanwhile, in the background, the Mutation will be flushed to the WAL at some time later. This option currently may lose data. See HBASE-16689.
+- _SYNC_WAL_: The **default**. Each edit is sync'd to HDFS before we return success to the client.
+- _FSYNC_WAL_: Each edit is fsync'd to HDFS and the filesystem before we return success to the client.
+
+Do not confuse the _ASYNC_WAL_ option on a Mutation or Table with the _AsyncFSWAL_ writer; they are distinct options unfortunately closely named
+
+### Custom WAL Directory
+
+HBASE-17437 added support for specifying a WAL directory outside the HBase root directory or even in a different FileSystem since 1.3.3/2.0+. Some FileSystems (such as Amazon S3) don't support append or consistent writes, in such scenario WAL directory needs to be configured in a different FileSystem to avoid loss of writes.
+
+Following configurations are added to accomplish this:
+
+1. `hbase.wal.dir`
+ This defines where the root WAL directory is located, could be on a different FileSystem than the root directory. WAL directory can not be set to a subdirectory of the root directory. The default value of this is the root directory if unset.
+2. `hbase.rootdir.perms`
+ Configures FileSystem permissions to set on the root directory. This is '700' by default.
+3. `hbase.wal.dir.perms`
+ Configures FileSystem permissions to set on the WAL directory FileSystem. This is '700' by default.
+
+
+ While migrating to custom WAL dir (outside the HBase root directory or a different FileSystem)
+ existing WAL files must be copied manually to new WAL dir, otherwise it may lead to data
+ loss/inconsistency as HMaster has no information about previous WAL directory.
+
+
+### Disabling the WAL
+
+It is possible to disable the WAL, to improve performance in certain specific situations. However, disabling the WAL puts your data at risk. The only situation where this is recommended is during a bulk load. This is because, in the event of a problem, the bulk load can be re-run with no risk of data loss.
+
+The WAL is disabled by calling the HBase client field `Mutation.writeToWAL(false)`. Use the `Mutation.setDurability(Durability.SKIP_WAL)` and Mutation.getDurability() methods to set and get the field's value. There is no way to disable the WAL for only a specific table.
+
+
+ If you disable the WAL for anything other than bulk loads, your data is at risk.
+
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/snapshot-scanner.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/snapshot-scanner.mdx
new file mode 100644
index 000000000000..f80ab118401a
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/snapshot-scanner.mdx
@@ -0,0 +1,112 @@
+---
+title: "Scan Over Snapshot"
+description: "Using TableSnapshotScanner to scan HBase snapshots directly from HDFS, bypassing RegionServers for better performance."
+---
+
+In HBase, a scan of a table costs server-side HBase resources reading, formating, and returning data back to the client.
+Luckily, HBase provides a TableSnapshotScanner and TableSnapshotInputFormat (introduced by [HBASE-8369](https://issues.apache.org/jira/browse/HBASE-8369)),
+which can scan HBase-written HFiles directly in the HDFS filesystem completely by-passing hbase. This access mode
+performs better than going via HBase and can be used with an offline HBase with in-place or exported
+snapshot HFiles.
+
+To read HFiles directly, the user must have sufficient permissions to access snapshots or in-place hbase HFiles.
+
+## TableSnapshotScanner
+
+TableSnapshotScanner provides a means for running a single client-side scan over snapshot files.
+When using TableSnapshotScanner, we must specify a temporary directory to copy the snapshot files into.
+The client user should have write permissions to this directory, and the dir should not be a subdirectory of
+the hbase.rootdir. The scanner deletes the contents of the directory once the scanner is closed.
+
+### Use TableSnapshotScanner
+
+```java
+Path restoreDir = new Path("XX"); // restore dir should not be a subdirectory of hbase.rootdir
+Scan scan = new Scan();
+try (TableSnapshotScanner scanner = new TableSnapshotScanner(conf, restoreDir, snapshotName, scan)) {
+ Result result = scanner.next();
+ while (result != null) {
+ ...
+ result = scanner.next();
+ }
+}
+```
+
+## TableSnapshotInputFormat
+
+TableSnapshotInputFormat provides a way to scan over snapshot HFiles in a MapReduce job.
+
+### Use TableSnapshotInputFormat
+
+```java
+Job job = new Job(conf);
+Path restoreDir = new Path("XX"); // restore dir should not be a subdirectory of hbase.rootdir
+Scan scan = new Scan();
+TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, MyTableMapper.class, MyMapKeyOutput.class, MyMapOutputValueWritable.class, job, true, restoreDir);
+```
+
+## Permission to access snapshot and data files
+
+Generally, only the HBase owner or the HDFS admin have the permission to access HFiles.
+
+[HBASE-18659](https://issues.apache.org/jira/browse/HBASE-18659) uses HDFS ACLs to make HBase granted user have permission to access snapshot files.
+
+### HDFS ACLs
+
+[HDFS ACLs](https://hadoop.apache.org/docs/r2.7.1/hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html#ACLs_Access_Control_Lists) supports an "access ACL", which defines the rules to enforce during permission checks, and a "default ACL", which defines the ACL entries that new child files or sub-directories receive automatically during creation. Via HDFS ACLs, HBase syncs granted users with read permission to HFiles.
+
+### Basic idea
+
+The HBase files are organized in the following ways:
+
+- `{hbase-rootdir}/.tmp/data/{namespace}/{table}`
+- `{hbase-rootdir}/data/{namespace}/{table}`
+- `{hbase-rootdir}/archive/data/{namespace}/{table}`
+- `{hbase-rootdir}/.hbase-snapshot/{snapshotName}`
+
+So the basic idea is to add or remove HDFS ACLs to files of the global/namespace/table directory
+when grant or revoke permission to global/namespace/table.
+
+See the design doc in [HBASE-18659](https://issues.apache.org/jira/browse/HBASE-18659) for more details.
+
+### Configuration to use this feature
+
+- Firstly, make sure that HDFS ACLs are enabled and umask is set to 027
+
+ ```
+ dfs.namenode.acls.enabled = true
+ fs.permissions.umask-mode = 027
+ ```
+
+- Add master coprocessor, please make sure the SnapshotScannerHDFSAclController is configured after AccessController
+
+ ```
+ hbase.coprocessor.master.classes = "org.apache.hadoop.hbase.security.access.AccessController
+ ,org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclController"
+ ```
+
+- Enable this feature
+
+ ```
+ hbase.acl.sync.to.hdfs.enable=true
+ ```
+
+- Modify table scheme to enable this feature for a specified table, this config is
+ false by default for every table, this means the HBase granted ACLs will not be synced to HDFS
+
+ ```ruby
+ alter 't1', CONFIGURATION => {'hbase.acl.sync.to.hdfs.enable' => 'true'}
+ ```
+
+### Limitation
+
+There are some limitations for this feature:
+
+- If we enable this feature, some master operations such as grant, revoke, snapshot... (See the design doc for more details) will be slower as we need to sync HDFS ACLs to related hfiles.
+- HDFS has a config which limits the max ACL entries num for one directory or file:
+ ```
+ dfs.namenode.acls.max.entries = 32(default value)
+ ```
+ The 32 entries include four fixed users for each directory or file: owner, group, other, and mask. For a directory, the four users contain 8 ACL entries(access and default) and for a file, the four users contain 4 ACL entries(access). This means there are 24 ACL entries left for named users or groups.
+ Based on this limitation, we can only sync up to 12 HBase granted users' ACLs. This means, if a table enables this feature, then the total users with table, namespace of this table, global READ permission should not be greater than 12.
+- There are some cases that this coprocessor has not handled or could not handle, so the user HDFS ACLs are not synced normally. It will not make a reference link to another hfile of other tables.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/timeline-consistent-reads.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/timeline-consistent-reads.mdx
new file mode 100644
index 000000000000..a70a28e6d208
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/architecture/timeline-consistent-reads.mdx
@@ -0,0 +1,332 @@
+---
+title: "Timeline-consistent High Available Reads"
+description: "Using region replicas to achieve high availability for reads with timeline consistency, reducing read unavailability during failures."
+---
+
+## Introduction
+
+HBase, architecturally, always had the strong consistency guarantee from the start. All reads and writes are routed through a single region server, which guarantees that all writes happen in an order, and all reads are seeing the most recent committed data.
+
+However, because of this single homing of the reads to a single location, if the server becomes unavailable, the regions of the table that were hosted in the region server become unavailable for some time. There are three phases in the region recovery process - detection, assignment, and recovery. Of these, the detection is usually the longest and is presently in the order of 20-30 seconds depending on the ZooKeeper session timeout. During this time and before the recovery is complete, the clients will not be able to read the region data.
+
+However, for some use cases, either the data may be read-only, or doing reads against some stale data is acceptable. With timeline-consistent high available reads, HBase can be used for these kind of latency-sensitive use cases where the application can expect to have a time bound on the read completion.
+
+For achieving high availability for reads, HBase provides a feature called _region replication_. In this model, for each region of a table, there will be multiple replicas that are opened in different RegionServers. By default, the region replication is set to 1, so only a single region replica is deployed and there will not be any changes from the original model. If region replication is set to 2 or more, then the master will assign replicas of the regions of the table. The Load Balancer ensures that the region replicas are not co-hosted in the same region servers and also in the same rack (if possible).
+
+All of the replicas for a single region will have a unique replica*id, starting from 0. The region replica having replica_id==0 is called the primary region, and the others \_secondary regions* or secondaries. Only the primary can accept writes from the client, and the primary will always contain the latest changes. Since all writes still have to go through the primary region, the writes are not highly-available (meaning they might block for some time if the region becomes unavailable).
+
+## Timeline Consistency
+
+With this feature, HBase introduces a Consistency definition, which can be provided per read operation (get or scan).
+
+```java
+public enum Consistency {
+ STRONG,
+ TIMELINE
+}
+```
+
+`Consistency.STRONG` is the default consistency model provided by HBase. In case the table has region replication = 1, or in a table with region replicas but the reads are done with this consistency, the read is always performed by the primary regions, so that there will not be any change from the previous behaviour, and the client always observes the latest data.
+
+In case a read is performed with `Consistency.TIMELINE`, then the read RPC will be sent to the primary region server first. After a short interval (`hbase.client.primaryCallTimeout.get`, 10ms by default), parallel RPC for secondary region replicas will also be sent if the primary does not respond back. After this, the result is returned from whichever RPC is finished first. If the response came back from the primary region replica, we can always know that the data is latest. For this Result.isStale() API has been added to inspect the staleness. If the result is from a secondary region, then Result.isStale() will be set to true. The user can then inspect this field to possibly reason about the data.
+
+In terms of semantics, TIMELINE consistency as implemented by HBase differs from pure eventual consistency in these respects:
+
+- Single homed and ordered updates: Region replication or not, on the write side, there is still only 1 defined replica (primary) which can accept writes. This replica is responsible for ordering the edits and preventing conflicts. This guarantees that two different writes are not committed at the same time by different replicas and the data diverges. With this, there is no need to do read-repair or last-timestamp-wins kind of conflict resolution.
+- The secondaries also apply the edits in the order that the primary committed them. This way the secondaries will contain a snapshot of the primaries data at any point in time. This is similar to RDBMS replications and even HBase's own multi-datacenter replication, however in a single cluster.
+- On the read side, the client can detect whether the read is coming from up-to-date data or is stale data. Also, the client can issue reads with different consistency requirements on a per-operation basis to ensure its own semantic guarantees.
+- The client can still observe edits out-of-order, and can go back in time, if it observes reads from one secondary replica first, then another secondary replica. There is no stickiness to region replicas or a transaction-id based guarantee. If required, this can be implemented later though.
+
+**Timeline Consistency**
+
+
+To better understand the TIMELINE semantics, let's look at the above diagram. Let's say that there are two clients, and the first one writes x=1 at first, then x=2 and x=3 later. As above, all writes are handled by the primary region replica. The writes are saved in the write ahead log (WAL), and replicated to the other replicas asynchronously. In the above diagram, notice that replica_id=1 received 2 updates, and its data shows that x=2, while the replica_id=2 only received a single update, and its data shows that x=1.
+
+If client1 reads with STRONG consistency, it will only talk with the replica_id=0, and thus is guaranteed to observe the latest value of x=3. In case of a client issuing TIMELINE consistency reads, the RPC will go to all replicas (after primary timeout) and the result from the first response will be returned back. Thus the client can see either 1, 2 or 3 as the value of x. Let's say that the primary region has failed and log replication cannot continue for some time. If the client does multiple reads with TIMELINE consistency, she can observe x=2 first, then x=1, and so on.
+
+## Tradeoffs
+
+Having secondary regions hosted for read availability comes with some tradeoffs which should be carefully evaluated per use case. Following are advantages and disadvantages.
+
+**Advantages:**
+
+- High availability for read-only tables
+- High availability for stale reads
+- Ability to do very low latency reads with very high percentile (99.9%+) latencies for stale reads
+
+**Disadvantages:**
+
+- Double / Triple MemStore usage (depending on region replication count) for tables with region replication > 1
+- Increased block cache usage
+- Extra network traffic for log replication
+- Extra backup RPCs for replicas
+
+To serve the region data from multiple replicas, HBase opens the regions in secondary mode in the region servers. The regions opened in secondary mode will share the same data files with the primary region replica, however each secondary region replica will have its own MemStore to keep the unflushed data (only primary region can do flushes). Also to serve reads from secondary regions, the blocks of data files may be also cached in the block caches for the secondary regions.
+
+## Where is the code
+
+This feature is delivered in two phases, Phase 1 and 2. The first phase is done in time for HBase-1.0.0 release. Meaning that using HBase-1.0.x, you can use all the features that are marked for Phase 1. Phase 2 is committed in HBase-1.1.0, meaning all HBase versions after 1.1.0 should contain Phase 2 items.
+
+## Propagating writes to region replicas
+
+As discussed above writes only go to the primary region replica. For propagating the writes from the primary region replica to the secondaries, there are two different mechanisms. For read-only tables, you do not need to use any of the following methods. Disabling and enabling the table should make the data available in all region replicas. For mutable tables, you have to use **only** one of the following mechanisms: storefile refresher, or async wal replication. The latter is recommended.
+
+### StoreFile Refresher
+
+The first mechanism is store file refresher which is introduced in HBase-1.0+. Store file refresher is a thread per region server, which runs periodically, and does a refresh operation for the store files of the primary region for the secondary region replicas. If enabled, the refresher will ensure that the secondary region replicas see the new flushed, compacted or bulk loaded files from the primary region in a timely manner. However, this means that only flushed data can be read back from the secondary region replicas, and after the refresher is run, making the secondaries lag behind the primary for an a longer time.
+
+For turning this feature on, you should configure `hbase.regionserver.storefile.refresh.period` to a non-zero value. See Configuration section below.
+
+### Async WAL replication
+
+The second mechanism for propagation of writes to secondaries is done via the “Async WAL Replication” feature. It is only available in HBase-1.1+. This works similarly to HBase's multi-datacenter replication, but instead the data from a region is replicated to the secondary regions. Each secondary replica always receives and observes the writes in the same order that the primary region committed them. In some sense, this design can be thought of as “in-cluster replication”, where instead of replicating to a different datacenter, the data goes to secondary regions to keep secondary region's in-memory state up to date. The data files are shared between the primary region and the other replicas, so that there is no extra storage overhead. However, the secondary regions will have recent non-flushed data in their memstores, which increases the memory overhead. The primary region writes flush, compaction, and bulk load events to its WAL as well, which are also replicated through wal replication to secondaries. When they observe the flush/compaction or bulk load event, the secondary regions replay the event to pick up the new files and drop the old ones.
+
+Committing writes in the same order as in primary ensures that the secondaries won't diverge from the primary regions data, but since the log replication is asynchronous, the data might still be stale in secondary regions.
+
+Async WAL Replication is **disabled** by default. You can enable this feature by setting `hbase.region.replica.replication.enabled` to `true`.
+
+Before 3.0.0, this feature works as a replication endpoint, the performance and latency characteristics is expected to be similar to inter-cluster replication. And once enabled, it will create a replication peer named `region_replica_replication` as a replication peer when you create a table with region replication \> 1 for the first time.
+
+if you want to disable this feature, you need to do two actions in the following order: \* Set configuration property `hbase.region.replica.replication.enabled` to false in `hbase-site.xml` (see Configuration section below) \* Disable the replication peer named `region_replica_replication` in the cluster using hbase shell or `Admin` class:
+
+```ruby
+hbase> disable_peer 'region_replica_replication'
+```
+
+In 3.0.0, this feature is re-implemented to decouple with the general replication framework. Now we do not need to create a special replication peer. And during rolling upgrading, we will remove this replication peer automatically if it is present. See [HBASE-26233](https://issues.apache.org/jira/browse/HBASE-26233) and the design doc in our git repo for more details.
+
+Async WAL Replication and the `hbase:meta` table is a little more involved and gets its own section below; see [Region replication for META table's region](/docs/architecture/timeline-consistent-reads#region-replication-for-meta-tables-region)
+
+## Store File TTL
+
+In both of the write propagation approaches mentioned above, store files of the primary will be opened in secondaries independent of the primary region. So for files that the primary compacted away, the secondaries might still be referring to these files for reading. Both features are using HFileLinks to refer to files, but there is no protection (yet) for guaranteeing that the file will not be deleted prematurely. Thus, as a guard, you should set the configuration property `hbase.master.hfilecleaner.ttl` to a larger value, such as 1 hour to guarantee that you will not receive IOExceptions for requests going to replicas.
+
+## Region replication for META table's region
+
+The general Async WAL Replication does not work for the META table's WAL. The meta table's secondary replicas refresh themselves from the persistent store files every `hbase.regionserver.meta.storefile.refresh.period`, (a non-zero value). Note how the META replication period is distinct from the user-space `hbase.regionserver.storefile.refresh.period` value.
+
+### Async WAL Replication for META table as of hbase-2.4.0+
+
+Async WAL replication for META is added as a new feature in 2.4.0. Set `hbase.region.replica.replication.catalog.enabled` to enable async WAL Replication for META region replicas. It is off by default.
+
+Regarding META replicas count, up to hbase-2.4.0, you would set the special property 'hbase.meta.replica.count'. Now you can alter the META table as you would a user-space table (if `hbase.meta.replica.count` is set, it will take precedent over what is set for replica count in the META table updating META replica count to match).
+
+### Async WAL Replication for META table as of hbase-3.0.0+
+
+In [HBASE-26233](https://issues.apache.org/jira/browse/HBASE-26233) we re-implemented the region replication framework to not rely on the general replication framework, so it can work together with META table as well. The code described in the above section have been removed mostly, but the config `hbase.region.replica.replication.catalog.enabled` is still kept, you could still use it to control whether to enable async wal replication for META table. And the ability to alter META table is also kept.
+
+### Load Balancing META table load
+
+hbase-2.4.0 also adds a new client-side `LoadBalance` mode. When enabled client-side, clients will try to read META replicas first before falling back on the primary. Before this, the replica lookup mode — now named `HedgedRead` in hbase-2.4.0 — had clients read the primary and if no response after a configurable amount of time had elapsed, it would start up reads against the replicas. Starting from hbase-2.4.12(and all higher minor versions), with client-side `LoadBalance` mode, clients load balance META scan requests across all META replica regions, including the primary META region. In case of exceptions such as NotServingRegionException, it will fall back on the primary META region.
+
+The new 'LoadBalance' mode helps alleviate hotspotting on the META table distributing META read load.
+
+To enable the meta replica locator's load balance mode, please set the following configuration at on the **client-side** (only): set 'hbase.locator.meta.replicas.mode' to "LoadBalance". Valid options for this configuration are `None`, `HedgedRead`, and `LoadBalance`. Option parse is case insensitive. The default mode is `None` (which falls through to `HedgedRead`, the current default). Do NOT put this configuration in any hbase server-side's configuration, Master or RegionServer (Master could make decisions based off stale state — to be avoided).
+
+## Memory accounting
+
+The secondary region replicas refer to the data files of the primary region replica, but they have their own memstores (in HBase-1.1+) and uses block cache as well. However, one distinction is that the secondary region replicas cannot flush the data when there is memory pressure for their memstores. They can only free up memstore memory when the primary region does a flush and this flush is replicated to the secondary. Since in a region server hosting primary replicas for some regions and secondaries for some others, the secondaries might cause extra flushes to the primary regions in the same host. In extreme situations, there can be no memory left for adding new writes coming from the primary via wal replication. For unblocking this situation (and since secondary cannot flush by itself), the secondary is allowed to do a “store file refresh” by doing a file system list operation to pick up new files from primary, and possibly dropping its memstore. This refresh will only be performed if the memstore size of the biggest secondary region replica is at least `hbase.region.replica.storefile.refresh.memstore.multiplier` (default 4) times bigger than the biggest memstore of a primary replica. One caveat is that if this is performed, the secondary can observe partial row updates across column families (since column families are flushed independently). The default should be good to not do this operation frequently. You can set this value to a large number to disable this feature if desired, but be warned that it might cause the replication to block forever.
+
+## Secondary replica failover
+
+When a secondary region replica first comes online, or fails over, it may have served some edits from its memstore. Since the recovery is handled differently for secondary replicas, the secondary has to ensure that it does not go back in time before it starts serving requests after assignment. For doing that, the secondary waits until it observes a full flush cycle (start flush, commit flush) or a “region open event” replicated from the primary. Until this happens, the secondary region replica will reject all read requests by throwing an IOException with message “The region's reads are disabled”. However, the other replicas will probably still be available to read, thus not causing any impact for the rpc with TIMELINE consistency. To facilitate faster recovery, the secondary region will trigger a flush request from the primary when it is opened. The configuration property `hbase.region.replica.wait.for.primary.flush` (enabled by default) can be used to disable this feature if needed.
+
+## Configuration properties
+
+To use highly available reads, you should set the following properties in `hbase-site.xml` file. There is no specific configuration to enable or disable region replicas. Instead you can change the number of region replicas per table to increase or decrease at the table creation or with alter table. The following configuration is for using async wal replication and using meta replicas of 3.
+
+### Server side properties
+
+```xml
+
+ hbase.regionserver.storefile.refresh.period
+ 0
+
+ The period (in milliseconds) for refreshing the store files for the secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting.
+
+
+
+
+ hbase.regionserver.meta.storefile.refresh.period
+ 300000
+
+ The period (in milliseconds) for refreshing the store files for the hbase:meta tables secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting. This should be a non-zero number if meta replicas are enabled.
+
+
+
+
+ hbase.region.replica.replication.enabled
+ true
+
+ Whether asynchronous WAL replication to the secondary region replicas is enabled or not. If this is enabled, a replication peer named "region_replica_replication" will be created which will tail the logs and replicate the mutations to region replicas for tables that have region replication > 1. If this is enabled once, disabling this replication also requires disabling the replication peer using shell or Admin java class. Replication to secondary region replicas works over standard inter-cluster replication.
+
+
+
+
+ hbase.master.hfilecleaner.ttl
+ 3600000
+
+ The period (in milliseconds) to keep store files in the archive folder before deleting them from the file system.
+
+
+
+
+ hbase.region.replica.storefile.refresh.memstore.multiplier
+ 4
+
+ The multiplier for a "store file refresh" operation for the secondary region replica. If a region server has memory pressure, the secondary region will refresh it's store files if the memstore size of the biggest secondary replica is bigger this many times than the memstore size of the biggest primary replica. Set this to a very big value to disable this feature (not recommended).
+
+
+
+
+ hbase.region.replica.wait.for.primary.flush
+ true
+
+ Whether to wait for observing a full flush cycle from the primary before start serving data in a secondary. Disabling this might cause the secondary region replicas to go back in time for reads between region movements.Please note that if you set per-table property `REGION_MEMSTORE_REPLICATION` to false,`hbase.region.replica.wait.for.primary.flush` will be ignored.
+
+
+```
+
+One thing to keep in mind also is that, region replica placement policy is only enforced by the `StochasticLoadBalancer` which is the default balancer. If you are using a custom load balancer property in hbase-site.xml (`hbase.master.loadbalancer.class`) replicas of regions might end up being hosted in the same server.
+
+### Client side properties
+
+Ensure to set the following for all clients (and servers) that will use region replicas.
+
+```xml
+
+ hbase.ipc.client.specificThreadForWriting
+ true
+
+ Whether to enable interruption of RPC threads at the client side. This is required for region replicas with fallback RPC's to secondary regions.
+
+
+
+ hbase.client.primaryCallTimeout.get
+ 10000
+
+ The timeout (in microseconds), before secondary fallback RPC's are submitted for get requests with Consistency.TIMELINE to the secondary replicas of the regions. Defaults to 10ms. Setting this lower will increase the number of RPC's, but will lower the p99 latencies.
+
+
+
+ hbase.client.primaryCallTimeout.multiget
+ 10000
+
+ The timeout (in microseconds), before secondary fallback RPC's are submitted for multi-get requests (Table.get(List)) with Consistency.TIMELINE to the secondary replicas of the regions. Defaults to 10ms. Setting this lower will increase the number of RPC's, but will lower the p99 latencies.
+
+
+
+ hbase.client.replicaCallTimeout.scan
+ 1000000
+
+ The timeout (in microseconds), before secondary fallback RPC's are submitted for scan requests with Consistency.TIMELINE to the secondary replicas of the regions. Defaults to 1 sec. Setting this lower will increase the number of RPC's, but will lower the p99 latencies.
+
+
+
+ hbase.meta.replicas.use
+ true
+
+ Whether to use meta table replicas or not. Default is false.
+
+
+```
+
+Note HBase-1.0.x users should use `hbase.ipc.client.allowsInterrupt` rather than `hbase.ipc.client.specificThreadForWriting`.
+
+## User Interface
+
+In the masters user interface, the region replicas of a table are also shown together with the primary regions. You can notice that the replicas of a region will share the same start and end keys and the same region name prefix. The only difference would be the appended replica_id (which is encoded as hex), and the region encoded name will be different. You can also see the replica ids shown explicitly in the UI.
+
+## Creating a table with region replication
+
+Region replication is a per-table property. All tables have `REGION_REPLICATION = 1` by default, which means that there is only one replica per region. You can set and change the number of replicas per region of a table by supplying the `REGION_REPLICATION` property in the table descriptor.
+
+There is another per-table property `REGION_MEMSTORE_REPLICATION`.All tables have `REGION_MEMSTORE_REPLICATION = true` by default, which means the new data written to the primary region should be replicated. If you set this to `false`, replicas do not receive memstore updates from the primary RegionServer,they will only receive updates for events like flushes and bulkloads, and will not have access to data which the primary has not yet flushed. Please note that if you set `REGION_MEMSTORE_REPLICATION` to false,`hbase.region.replica.wait.for.primary.flush` will be ignored.
+
+### Shell
+
+```ruby
+create 't1', 'f1', {REGION_REPLICATION => 2}
+
+describe 't1'
+for i in 1..100
+put 't1', "r#{i}", 'f1:c1', i
+end
+flush 't1'
+```
+
+### Java [#architecture-timeline-consistent-reads-creating-table-with-region-replication-java]
+
+```java
+HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(“test_table”));
+htd.setRegionReplication(2);
+...
+admin.createTable(htd);
+```
+
+You can also use `setRegionReplication()` and alter table to increase, decrease the region replication for a table.
+
+## Read API and Usage
+
+### Shell
+
+You can do reads in shell using a the Consistency.TIMELINE semantics as follows
+
+```java
+hbase(main):001:0> get 't1','r6', {CONSISTENCY => "TIMELINE"}
+```
+
+You can simulate a region server pausing or becoming unavailable and do a read from the secondary replica:
+
+```bash
+$ kill -STOP
+
+hbase(main):001:0> get 't1','r6', {CONSISTENCY => "TIMELINE"}
+```
+
+Using scans is also similar
+
+```java
+hbase> scan 't1', {CONSISTENCY => 'TIMELINE'}
+```
+
+### Java
+
+You can set the consistency for Gets and Scans and do requests as follows.
+
+```java
+Get get = new Get(row);
+get.setConsistency(Consistency.TIMELINE);
+...
+Result result = table.get(get);
+```
+
+You can also pass multiple gets:
+
+```java
+Get get1 = new Get(row);
+get1.setConsistency(Consistency.TIMELINE);
+...
+ArrayList gets = new ArrayList();
+gets.add(get1);
+...
+Result[] results = table.get(gets);
+```
+
+And Scans:
+
+```java
+Scan scan = new Scan();
+scan.setConsistency(Consistency.TIMELINE);
+...
+ResultScanner scanner = table.getScanner(scan);
+```
+
+You can inspect whether the results are coming from primary region or not by calling the `Result.isStale()` method:
+
+```java
+Result result = table.get(get);
+if (result.isStale()) {
+ ...
+}
+```
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/asf.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/asf.mdx
new file mode 100644
index 000000000000..fa88e1d4e2dd
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/asf.mdx
@@ -0,0 +1,12 @@
+---
+title: "HBase and the Apache Software Foundation"
+description: "HBase is a project in the Apache Software Foundation and as such there are responsibilities to the ASF to ensure a healthy project."
+---
+
+## ASF Development Process
+
+See the [Apache Development Process page](https://www.apache.org/dev/#committers) for all sorts of information on how the ASF is structured (e.g., PMC, committers, contributors), to tips on contributing and getting involved, and how open-source works at ASF.
+
+## ASF Board Reporting
+
+Once a quarter, each project in the ASF portfolio submits a report to the ASF board. This is done by the HBase project lead and the committers. See [ASF board reporting](https://www.apache.org/foundation/board/reporting) for more information.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/additional-topics.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/additional-topics.mdx
new file mode 100644
index 000000000000..9f23d553474e
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/additional-topics.mdx
@@ -0,0 +1,301 @@
+---
+title: "Additional Topics"
+description: "Configuration keys, security considerations, and best practices for HBase backup and restore operations."
+---
+
+## Configuration keys
+
+The backup and restore feature includes both required and optional configuration keys.
+
+### Required properties
+
+**_hbase.backup.enable_**: Controls whether or not the feature is enabled (Default: `false`). Set this value to `true`.
+
+**_hbase.master.logcleaner.plugins_**: A comma-separated list of classes invoked when cleaning logs in the HBase Master. Set
+this value to `org.apache.hadoop.hbase.backup.master.BackupLogCleaner` or append it to the current value.
+
+**_hbase.procedure.master.classes_**: A comma-separated list of classes invoked with the Procedure framework in the Master. Set
+this value to `org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager` or append it to the current value.
+
+**_hbase.procedure.regionserver.classes_**: A comma-separated list of classes invoked with the Procedure framework in the RegionServer.
+Set this value to `org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager` or append it to the current value.
+
+**_hbase.coprocessor.region.classes_**: A comma-separated list of RegionObservers deployed on tables. Set this value to
+`org.apache.hadoop.hbase.backup.BackupObserver` or append it to the current value.
+
+**_hbase.coprocessor.master.classes_**: A comma-separated list of MasterObservers deployed on tables. Set this value to
+`org.apache.hadoop.hbase.backup.BackupMasterObserver` or append it to the current value.
+
+**_hbase.master.hfilecleaner.plugins_**: A comma-separated list of HFileCleaners deployed on the Master. Set this value
+to `org.apache.hadoop.hbase.backup.BackupHFileCleaner` or append it to the current value.
+
+### Optional properties
+
+**_hbase.backup.system.ttl_**: The time-to-live in seconds of data in the `hbase:backup` tables (default: forever). This property
+is only relevant prior to the creation of the `hbase:backup` table. Use the `alter` command in the HBase shell to modify the TTL
+when this table already exists. See the [below section](/docs/backup-restore/additional-topics#a-warning-on-file-system-growth) for more details on the impact of this
+configuration property.
+
+**_hbase.backup.attempts.max_**: The number of attempts to perform when taking hbase table snapshots (default: 10).
+
+**_hbase.backup.attempts.pause.ms_**: The amount of time to wait between failed snapshot attempts in milliseconds (default: 10000).
+
+**_hbase.backup.logroll.timeout.millis_**: The amount of time (in milliseconds) to wait for RegionServers to execute a WAL rolling
+in the Master's procedure framework (default: 30000).
+
+## Best Practices
+
+### Formulate a restore strategy and test it.
+
+Before you rely on a backup and restore strategy for your production environment, identify how backups must be performed,
+and more importantly, how restores must be performed. Test the plan to ensure that it is workable.
+At a minimum, store backup data from a production cluster on a different cluster or server. To further safeguard the data,
+use a backup location that is at a different physical location.
+
+If you have a unrecoverable loss of data on your primary production cluster as a result of computer system issues, you may
+be able to restore the data from a different cluster or server at the same site. However, a disaster that destroys the whole
+site renders locally stored backups useless. Consider storing the backup data and necessary resources (both computing capacity
+and operator expertise) to restore the data at a site sufficiently remote from the production site. In the case of a catastrophe
+at the whole primary site (fire, earthquake, etc.), the remote backup site can be very valuable.
+
+### Secure a full backup image first.
+
+As a baseline, you must complete a full backup of HBase data at least once before you can rely on incremental backups. The full
+backup should be stored outside of the source cluster. To ensure complete dataset recovery, you must run the restore utility
+with the option to restore baseline full backup. The full backup is the foundation of your dataset. Incremental backup data
+is applied on top of the full backup during the restore operation to return you to the point in time when backup was last taken.
+
+### Define and use backup sets for groups of tables that are logical subsets of the entire dataset.
+
+You can group tables into an object called a backup set. A backup set can save time when you have a particular group of tables
+that you expect to repeatedly back up or restore.
+
+When you create a backup set, you type table names to include in the group. The backup set includes not only groups of related
+tables, but also retains the HBase backup metadata. Afterwards, you can invoke the backup set name to indicate what tables apply
+to the command execution instead of entering all the table names individually.
+
+### Document the backup and restore strategy, and ideally log information about each backup.
+
+Document the whole process so that the knowledge base can transfer to new administrators after employee turnover. As an extra
+safety precaution, also log the calendar date, time, and other relevant details about the data of each backup. This metadata
+can potentially help locate a particular dataset in case of source cluster failure or primary site disaster. Maintain duplicate
+copies of all documentation: one copy at the production cluster site and another at the backup location or wherever it can be
+accessed by an administrator remotely from the production cluster.
+
+## Scenario: Safeguarding Application Datasets on Amazon S3
+
+This scenario describes how a hypothetical retail business uses backups to safeguard application data and then restore the dataset
+after failure.
+
+The HBase administration team uses backup sets to store data from a group of tables that have interrelated information for an
+application called green. In this example, one table contains transaction records and the other contains customer details. The
+two tables need to be backed up and be recoverable as a group.
+
+The admin team also wants to ensure daily backups occur automatically.
+
+
+
+The following is an outline of the steps and examples of commands that are used to backup the data for the _green_ application and
+to recover the data later. All commands are run when logged in as HBase superuser.
+
+- A backup set called _green_set_ is created as an alias for both the transactions table and the customer table. The backup set can
+ be used for all operations to avoid typing each table name. The backup set name is case-sensitive and should be formed with only
+ printable characters and without spaces.
+
+ ```bash
+ $ hbase backup set add green_set transactions
+ $ hbase backup set add green_set customer
+ ```
+
+- The first backup of green_set data must be a full backup. The following command example shows how credentials are passed to Amazon
+ S3 and specifies the file system with the s3a: prefix.
+
+ ```bash
+ $ ACCESS_KEY=ABCDEFGHIJKLMNOPQRST
+ $ SECRET_KEY=123456789abcdefghijklmnopqrstuvwxyzABCD
+ $ sudo -u hbase hbase backup create full\
+ s3a://$ACCESS_KEY:SECRET_KEY@prodhbasebackups/backups -s green_set
+ ```
+
+- Incremental backups should be run according to a schedule that ensures essential data recovery in the event of a catastrophe. At
+ this retail company, the HBase admin team decides that automated daily backups secures the data sufficiently. The team decides that
+ they can implement this by modifying an existing Cron job that is defined in `/etc/crontab`. Consequently, IT modifies the Cron job
+ by adding the following line:
+
+ ```bash
+ @daily hbase hbase backup create incremental s3a://$ACCESS_KEY:$SECRET_KEY@prodhbasebackups/backups -s green_set
+ ```
+
+- A catastrophic IT incident disables the production cluster that the green application uses. An HBase system administrator of the
+ backup cluster must restore the _green_set_ dataset to the point in time closest to the recovery objective.
+
+
+ If the administrator of the backup HBase cluster has the backup ID with relevant details in accessible records, the following
+ search with the `hdfs dfs -ls` command and manually scanning the backup ID list can be bypassed. Consider continuously maintaining
+ and protecting a detailed log of backup IDs outside the production cluster in your environment.
+
+
+ The HBase administrator runs the following command on the directory where backups are stored to print the list of successful backup
+ IDs on the console:
+
+ ```bash
+ hdfs dfs -ls -t /prodhbasebackups/backups
+ ```
+
+- The admin scans the list to see which backup was created at a date and time closest to the recovery objective. To do this, the
+ admin converts the calendar timestamp of the recovery point in time to Unix time because backup IDs are uniquely identified with
+ Unix time. The backup IDs are listed in reverse chronological order, meaning the most recent successful backup appears first.
+
+ The admin notices that the following line in the command output corresponds with the _green_set_ backup that needs to be restored:
+
+ ```bash
+ /prodhbasebackups/backups/backup_1467823988425`
+ ```
+
+- The admin restores green_set invoking the backup ID and the -overwrite option. The -overwrite option truncates all existing data
+ in the destination and populates the tables with data from the backup dataset. Without this flag, the backup data is appended to the
+ existing data in the destination. In this case, the admin decides to overwrite the data because it is corrupted.
+
+ ```bash
+ $ sudo -u hbase hbase restore -s green_set \
+ s3a://$ACCESS_KEY:$SECRET_KEY@prodhbasebackups/backups backup_1467823988425 \ -overwrite
+ ```
+
+## Security of Backup Data
+
+With this feature which makes copying data to remote locations, it's important to take a moment to clearly state the procedural
+concerns that exist around data security. Like the HBase replication feature, backup and restore provides the constructs to automatically
+copy data from within a corporate boundary to some system outside of that boundary. It is imperative when storing sensitive data that with backup and restore, much
+less any feature which extracts data from HBase, the locations to which data is being sent has undergone a security audit to ensure
+that only authenticated users are allowed to access that data.
+
+For example, with the above example of backing up data to S3, it is of the utmost importance that the proper permissions are assigned
+to the S3 bucket to ensure that only a minimum set of authorized users are allowed to access this data. Because the data is no longer
+being accessed via HBase, and its authentication and authorization controls, we must ensure that the filesystem storing that data is
+providing a comparable level of security. This is a manual step which users **must** implement on their own.
+
+## Technical Details of Incremental Backup and Restore
+
+HBase incremental backups enable more efficient capture of HBase table images than previous attempts
+at serial backup and restore solutions, such as those that only used HBase Export and Import APIs.
+Incremental backups use Write Ahead Logs (WALs) to capture the data changes since the
+previous backup was created. A WAL roll (create new WALs) is executed across all RegionServers
+to track the WALs that need to be in the backup.
+In addition to WALs, incremental backups also track bulk-loaded HFiles for tables under backup.
+
+Incremental backup gathers all WAL files generated since the last backup from the source cluster,
+converts them to HFiles in a `.tmp` directory under the `BACKUP_ROOT`, and then moves these
+HFiles to their final location under the backup root directory to form the backup image.
+It also reads bulk load records from the backup system table, forms the paths for the corresponding
+bulk-loaded HFiles, and copies those files to the backup destination.
+Bulk-loaded files are preserved (not deleted by cleaner chores) until they've been included in a
+backup (for each backup root).
+A process similar to the DistCp (distributed copy) tool is used to move the backup files to the
+target file system.
+
+When a table restore operation starts, a two-step process is initiated.
+First, the full backup is restored from the full backup image.
+Second, all HFiles from incremental backups between the last full backup and the incremental backup
+being restored (including bulk-loaded HFiles) are bulk loaded into the table using the
+HBase Bulk Load utility.
+
+You can only restore on a live HBase cluster because the data must be redistributed to complete the restore operation successfully.
+
+## A Warning on File System Growth
+
+As a reminder, incremental backups are implemented via retaining the write-ahead logs which HBase primarily uses for data durability.
+Thus, to ensure that all data needing to be included in a backup is still available in the system, the HBase backup and restore feature
+retains all write-ahead logs since the last backup until the next incremental backup is executed.
+
+Like HBase Snapshots, this can have an expectedly large impact on the HDFS usage of HBase for high volume tables. Take care in enabling
+and using the backup and restore feature, specifically with a mind to removing backup sessions when they are not actively being used.
+
+The only automated, upper-bound on retained write-ahead logs for backup and restore is based on the TTL of the `hbase:backup` system table which,
+as of the time this document is written, is infinite (backup table entries are never automatically deleted). This requires that administrators
+perform backups on a schedule whose frequency is relative to the amount of available space on HDFS (e.g. less available HDFS space requires
+more aggressive backup merges and deletions). As a reminder, the TTL can be altered on the `hbase:backup` table using the `alter` command
+in the HBase shell. Modifying the configuration property `hbase.backup.system.ttl` in hbase-site.xml after the system table exists has no effect.
+
+## Capacity Planning
+
+When designing a distributed system deployment, it is critical that some basic mathmatical rigor is executed to ensure sufficient computational
+capacity is available given the data and software requirements of the system. For this feature, the availability of network capacity is the largest
+bottleneck when estimating the performance of some implementation of backup and restore. The second most costly function is the speed at which
+data can be read/written.
+
+### Full Backups
+
+To estimate the duration of a full backup, we have to understand the general actions which are invoked:
+
+- Write-ahead log roll on each RegionServer: ones to tens of seconds per RegionServer in parallel. Relative to the load on each RegionServer.
+- Take an HBase snapshot of the table(s): tens of seconds. Relative to the number of regions and files that comprise the table.
+- Export the snapshot to the destination: see below. Relative to the size of the data and the network bandwidth to the destination.
+
+To approximate how long the final step will take, we have to make some assumptions on hardware. Be aware that these will _not_ be accurate for your
+system — these are numbers that your or your administrator know for your system. Let's say the speed of reading data from HDFS on a single node is
+capped at 80MB/s (across all Mappers that run on that host), a modern network interface controller (NIC) supports 10Gb/s, the top-of-rack switch can
+handle 40Gb/s, and the WAN between your clusters is 10Gb/s. This means that you can only ship data to your remote at a speed of 1.25GB/s — meaning
+that 16 nodes (`1.25 * 1024 / 80 = 16`) participating in the ExportSnapshot should be able to fully saturate the link between clusters. With more
+nodes in the cluster, we can still saturate the network but at a lesser impact on any one node which helps ensure local SLAs are made. If the size
+of the snapshot is 10TB, this would full backup would take in the ballpark of 2.5 hours (`10 * 1024 / 1.25 / (60 * 60) = 2.23hrs`)
+
+As a general statement, it is very likely that the WAN bandwidth between your local cluster and the remote storage is the largest
+bottleneck to the speed of a full backup.
+
+When the concern is restricting the computational impact of backups to a "production system", the above formulas can be reused with the optional
+command-line arguments to `hbase backup create`: `-b`, `-w`, `-q`. The `-b` option defines the bandwidth at which each worker (Mapper) would
+write data. The `-w` argument limits the number of workers that would be spawned in the DistCp job. The `-q` allows the user to specify a YARN
+queue which can limit the specific nodes where the workers will be spawned — this can quarantine the backup workers performing the copy to
+a set of non-critical nodes. Relating the `-b` and `-w` options to our earlier equations: `-b` would be used to restrict each node from reading
+data at the full 80MB/s and `-w` is used to limit the job from spawning 16 worker tasks.
+
+### Incremental Backup
+
+Like we did for full backups, we have to understand the incremental backup process to approximate its runtime and cost.
+
+- Identify new write-ahead logs since the last full or incremental backup: negligible. Apriori knowledge from the backup system table(s).
+- Read, filter, and write "minimized" HFiles equivalent to the WALs: dominated by the speed of writing data. Relative to write speed of HDFS.
+- Read bulk load records from the backup system table, form the paths for bulk-loaded HFiles, and copy them to the backup destination.
+- DistCp the HFiles to the destination: [see above](/docs/backup-restore/additional-topics#full-backups).
+
+For the second step, the dominating cost of this operation would be the re-writing the data (under the assumption that a majority of the
+data in the WAL is preserved). In this case, we can assume an aggregate write speed of 30MB/s per node. Continuing our 16-node cluster example,
+this would require approximately 15 minutes to perform this step for 50GB of data (50 \* 1024 / 60 / 60 = 14.2). The amount of time to start the
+DistCp MapReduce job would likely dominate the actual time taken to copy the data (50 / 1.25 = 40 seconds) and can be ignored.
+
+## Limitations of the Backup and Restore Utility
+
+**Serial backup operations**
+Backup operations cannot be run concurrently. An operation includes actions like create, delete, restore, and merge. Only one active backup session is supported. [HBASE-16391](https://issues.apache.org/jira/browse/HBASE-16391)
+will introduce multiple-backup sessions support.
+
+**No means to cancel backups**
+Both backup and restore operations cannot be canceled. ([HBASE-15997](https://issues.apache.org/jira/browse/HBASE-15997), [HBASE-15998](https://issues.apache.org/jira/browse/HBASE-15998)).
+The workaround to cancel a backup would be to kill the client-side backup command (`control-C`), ensure all relevant MapReduce jobs have exited, and then
+run the `hbase backup repair` command to ensure the system backup metadata is consistent.
+
+**Backups can only be saved to a single location**
+Copying backup information to multiple locations is an exercise left to the user. [HBASE-15476](https://issues.apache.org/jira/browse/HBASE-15476) will
+introduce the ability to specify multiple-backup destinations intrinsically.
+
+**HBase superuser access is required**
+Only an HBase superuser (e.g. hbase) is allowed to perform backup/restore, can pose a problem for shared HBase installations. Current mitigations would require
+coordination with system administrators to build and deploy a backup and restore strategy ([HBASE-14138](https://issues.apache.org/jira/browse/HBASE-14138)).
+
+**Backup restoration is an online operation**
+To perform a restore from a backup, it requires that the HBase cluster is online as a caveat of the current implementation ([HBASE-16573](https://issues.apache.org/jira/browse/HBASE-16573)).
+
+**Some operations may fail and require re-run**
+The HBase backup feature is primarily client driven. While there is the standard HBase retry logic built into the HBase Connection, persistent errors in executing operations
+may propagate back to the client (e.g. snapshot failure due to region splits). The backup implementation should be moved from client-side into the ProcedureV2 framework
+in the future which would provide additional robustness around transient/retryable failures. The `hbase backup repair` command is meant to correct states which the system
+cannot automatically detect and recover from.
+
+**Avoidance of declaration of public API**
+While the Java API to interact with this feature exists and its implementation is separated from an interface, insufficient rigor has been applied to determine if
+it is exactly what we intend to ship to users. As such, it is marked as for a `Private` audience with the expectation that, as users begin to try the feature, there
+will be modifications that would necessitate breaking compatibility ([HBASE-17517](https://issues.apache.org/jira/browse/HBASE-17517)).
+
+**Lack of global metrics for backup and restore**
+Individual backup and restore operations contain metrics about the amount of work the operation included, but there is no centralized location (e.g. the Master UI)
+which present information for consumption ([HBASE-16565](https://issues.apache.org/jira/browse/HBASE-16565)).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/administration.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/administration.mdx
new file mode 100644
index 000000000000..dfd3c666904b
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/administration.mdx
@@ -0,0 +1,172 @@
+---
+title: "Administration of Backup Images"
+description: "Managing HBase backup images including listing, describing, deleting, and merging backup sets for efficient storage."
+---
+
+The `hbase backup` command has several subcommands that help with administering backup images as they accumulate. Most production
+environments require recurring backups, so it is necessary to have utilities to help manage the data of the backup repository.
+Some subcommands enable you to find information that can help identify backups that are relevant in a search for particular data.
+You can also delete backup images.
+
+The following list details each `hbase backup subcommand` that can help administer backups. Run the full command-subcommand line as
+the HBase superuser.
+
+### Managing Backup Progress
+
+You can monitor a running backup in another terminal session by running the _hbase backup progress_ command and specifying the backup ID as an argument.
+
+For example, run the following command as hbase superuser to view the progress of a backup
+
+```bash
+$ hbase backup progress
+```
+
+#### Positional Command-Line Arguments [#backup-restore-administation-managing-backup-progress-positional-command-line-arguments]
+
+**_backup_id_**
+Specifies the backup that you want to monitor by seeing the progress information. The backupId is case-sensitive.
+
+#### Named Command-Line Arguments [#backup-restore-administation-manging-backup-progress-named-command-line-arguments]
+
+None.
+
+#### Example usage [#backup-restore-administation-manging-backup-progress-example-usage]
+
+```bash
+hbase backup progress backupId_1467823988425
+```
+
+### Managing Backup History
+
+This command displays a log of backup sessions. The information for each session includes backup ID, type (full or incremental), the tables
+in the backup, status, and start and end time. Specify the number of backup sessions to display with the optional -n argument.
+
+```bash
+$ hbase backup history
+```
+
+#### Positional Command-Line Arguments [#backup-restore-administation-managing-backup-history-positional-command-line-arguments]
+
+**_backup_id_**
+Specifies the backup that you want to monitor by seeing the progress information. The backupId is case-sensitive.
+
+#### Named Command-Line Arguments [#backup-restore-administation-managing-backup-history-named-command-line-arguments]
+
+**_-n \_**
+(Optional) The maximum number of backup records (Default: 10).
+
+**_-p \_**
+The full filesystem URI of where backup images are stored.
+
+**_-s \_**
+The name of the backup set to obtain history for. Mutually exclusive with the _-t_ option.
+
+**_-t \_**
+The name of table to obtain history for. Mutually exclusive with the _-s_ option.
+
+#### Example usage [#backup-restore-administation-managing-backup-history-example-usage]
+
+```bash
+$ hbase backup history
+$ hbase backup history -n 20
+$ hbase backup history -t WebIndexRecords
+```
+
+### Describing a Backup Image
+
+This command can be used to obtain information about a specific backup image.
+
+```bash
+$ hbase backup describe
+```
+
+#### Positional Command-Line Arguments [#backup-restore-administation-describing-a-backup-image-command-line-arguments]
+
+**_backup_id_**
+The ID of the backup image to describe.
+
+#### Named Command-Line Arguments [#backup-restore-administation-describing-a-backup-image-named-command-line-arguments]
+
+None.
+
+#### Example usage [#backup-restore-administation-describing-a-backup-image-example-usage]
+
+```bash
+$ hbase backup describe backupId_1467823988425
+```
+
+### Deleting Backup Images
+
+The `hbase backup delete` command deletes backup images that are no longer needed.
+
+#### Syntax
+
+```bash
+$ hbase backup delete -l
+$ hbase backup delete -k
+```
+
+#### Named Command-Line Arguments [#backup-restore-administation-deleting-backup-images-named-command-line-arguments]
+
+**_-l \_**
+Comma-separated list of backup IDs to delete.
+
+**_-k \_**
+Deletes all backup images completed more than the specified number of days ago.
+
+
+ These options are **mutually exclusive**. Only one of `-l` or `-k` may be used at a time.
+
+
+#### Example Usage [#backup-restore-administation-deleting-backup-images-example-usage]
+
+Delete specific backup images by ID:
+
+```bash
+$ hbase backup delete -l backupId_1467823988425,backupId_1467824989999
+```
+
+Delete all backup images older than 30 days:
+
+```bash
+$ hbase backup delete -k 30
+```
+
+
+- Deleting a backup may affect all following incremental backups (in the same backup root) up to
+ the next full backup. For example, if you take a full backup every 2 weeks and
+ daily incremental backups, running `hbase backup delete -k 7` when the full backup is older than
+ 7 days will effectively remove the data for all subsequent incremental backups.
+ The backup IDs may still be listed, but their data will be gone.
+
+- If the most recent backup is an incremental backup and you delete it,
+ you should run a **full backup** next.
+ Running another incremental backup immediately after may result in missing data in the
+ backup image. (See [HBASE-28084](https://issues.apache.org/jira/browse/HBASE-28084))
+
+
+
+### Backup Repair Command
+
+This command attempts to correct any inconsistencies in persisted backup metadata which exists as
+the result of software errors or unhandled failure scenarios. While the backup implementation tries
+to correct all errors on its own, this tool may be necessary in the cases where the system cannot
+automatically recover on its own.
+
+```bash
+$ hbase backup repair
+```
+
+#### Positional Command-Line Arguments [#backup-restore-administation-backup-repair-command-positional-command-line-arguments]
+
+None.
+
+#### Named Command-Line Arguments [#backup-restore-administation-backup-repair-command-named-command-line-arguments]
+
+None.
+
+#### Example usage [#backup-restore-administation-backup-repair-command-example-usage]
+
+```bash
+$ hbase backup repair
+```
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/commands.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/commands.mdx
new file mode 100644
index 000000000000..bdcc38b284fb
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/commands.mdx
@@ -0,0 +1,254 @@
+---
+title: "Backup and Restore commands"
+description: "Command-line utilities for creating, restoring, and merging HBase backups including full and incremental backup operations."
+---
+
+This covers the command-line utilities that administrators would run to create, restore, and merge backups. Tools to
+inspect details on specific backup sessions is covered in the next section, [Administration of Backup Images](/docs/backup-restore/administration).
+
+Run the command `hbase backup help ` to access the online help that provides basic information about a command
+and its options. The below information is captured in this help message for each command.
+
+### Creating a Backup Image
+
+
+ For HBase clusters also using Apache Phoenix: include the SQL system catalog tables in the backup.
+ In the event that you need to restore the HBase backup, access to the system catalog tables enable
+ you to resume Phoenix interoperability with the restored data.
+
+
+The first step in running the backup and restore utilities is to perform a full backup and to store the data in a separate image
+from the source. At a minimum, you must do this to get a baseline before you can rely on incremental backups.
+
+Run the following command as HBase superuser:
+
+```bash
+hbase backup create
+```
+
+After the command finishes running, the console prints a SUCCESS or FAILURE status message. The SUCCESS message includes a _backup_ ID.
+The backup ID is the Unix time (also known as Epoch time) that the HBase master received the backup request from the client.
+
+
+ Record the backup ID that appears at the end of a successful backup. In case the source cluster
+ fails and you need to recover the dataset with a restore operation, having the backup ID readily
+ available can save time.
+
+
+#### Positional Command-Line Arguments [#backup-restore-commands-creating-a-backup-image-positional-command-line-arguments]
+
+**_type_**
+The type of backup to execute: _full_ or _incremental_. As a reminder, an _incremental_ backup requires a _full_ backup to
+already exist.
+
+**_backup_path_**
+The _backup_path_ argument specifies the full filesystem URI of where to store the backup image. Valid prefixes are
+_hdfs:_, _webhdfs:_, _s3a:_ or other compatible Hadoop File System implementations.
+
+#### Named Command-Line Arguments [#backup-restore-commands-creating-a-backup-image-named-command-line-arguments]
+
+**_-t \_**
+A comma-separated list of tables to back up. If no tables are specified, all tables are backed up. No regular-expression or
+wildcard support is present; all table names must be explicitly listed. See [Backup Sets](/docs/backup-restore/commands#using-backup-sets) for more
+information about peforming operations on collections of tables. Mutually exclusive with the _-s_ option; one of these
+named options are required.
+
+**_-s \_**
+Identify tables to backup based on a backup set. See [Using Backup Sets](/docs/backup-restore/commands#using-backup-sets) for the purpose and usage
+of backup sets. Mutually exclusive with the _-t_ option.
+
+**_-w \_**
+(Optional) Specifies the number of parallel workers to copy data to backup destination. Backups are currently executed by MapReduce jobs
+so this value corresponds to the number of Mappers that will be spawned by the job.
+
+**_-b \_**
+(Optional) Specifies the bandwidth of each worker in MB per second.
+
+**_-d_**
+(Optional) Enables "DEBUG" mode which prints additional logging about the backup creation.
+
+**_-i_**
+(Optional) Ignore checksum verify between source snapshot and exported snapshot. Especially when the source and target file system types
+are different, we should use -i option to skip checksum-checks.
+
+**_-q \_**
+(Optional) Allows specification of the name of a YARN queue which the MapReduce job to create the backup should be executed in. This option
+is useful to prevent backup tasks from stealing resources away from other MapReduce jobs of high importance.
+
+#### Example usage [#backup-restore-commands-creating-a-backup-image-example-usage]
+
+```bash
+$ hbase backup create full hdfs://host5:9000/data/backup -t SALES2,SALES3 -w 3
+```
+
+This command creates a full backup image of two tables, SALES2 and SALES3, in the HDFS instance who NameNode is host5:9000
+in the path _/data/backup_. The _-w_ option specifies that no more than three parallel works complete the operation.
+
+### Restoring a Backup Image
+
+Run the following command as an HBase superuser. You can only restore a backup on a running HBase cluster because the data must be
+redistributed the RegionServers for the operation to complete successfully.
+
+```bash
+hbase restore
+```
+
+#### Positional Command-Line Arguments [#backup-restore-commands-restoring-a-backup-image-positional-command-line-arguments]
+
+**_backup_path_**
+The _backup_path_ argument specifies the full filesystem URI of where to store the backup image. Valid prefixes are
+_hdfs:_, _webhdfs:_, _s3a:_ or other compatible Hadoop File System implementations.
+
+**_backup_id_**
+The backup ID that uniquely identifies the backup image to be restored.
+
+#### Named Command-Line Arguments [#backup-restore-commands-restoring-a-backup-image-named-command-line-arguments]
+
+**_-t \_**
+A comma-separated list of tables to restore. See [Backup Sets](/docs/backup-restore/commands#using-backup-sets) for more
+information about peforming operations on collections of tables. Mutually exclusive with the _-s_ option; one of these
+named options are required.
+
+**_-s \_**
+Identify tables to backup based on a backup set. See [Using Backup Sets](/docs/backup-restore/commands#using-backup-sets) for the purpose and usage
+of backup sets. Mutually exclusive with the _-t_ option.
+
+**_-q \_**
+(Optional) Allows specification of the name of a YARN queue which the MapReduce job to create the backup should be executed in. This option
+is useful to prevent backup tasks from stealing resources away from other MapReduce jobs of high importance.
+
+**_-c_**
+(Optional) Perform a dry-run of the restore. The actions are checked, but not executed.
+
+**_-m \_**
+(Optional) A comma-separated list of tables to restore into. If this option is not provided, the original table name is used. When
+this option is provided, there must be an equal number of entries provided in the `-t` option.
+
+**_-o_**
+(Optional) Overwrites the target table for the restore if the table already exists.
+
+#### Example of Usage
+
+```bash
+hbase restore /tmp/backup_incremental backupId_1467823988425 -t mytable1,mytable2
+```
+
+This command restores two tables of an incremental backup image. In this example:
+• `/tmp/backup_incremental` is the path to the directory containing the backup image.
+• `backupId_1467823988425` is the backup ID.
+• `mytable1` and `mytable2` are the names of tables in the backup image to be restored.
+
+
+ If the namespace of a table being restored does not exist in the target environment, it will be
+ automatically created during the restore operation.
+ [HBASE-25707](https://issues.apache.org/jira/browse/HBASE-25707)
+
+
+### Merging Incremental Backup Images
+
+This command can be used to merge two or more incremental backup images into a single incremental
+backup image. This can be used to consolidate multiple, small incremental backup images into a single
+larger incremental backup image. This command could be used to merge hourly incremental backups
+into a daily incremental backup image, or daily incremental backups into a weekly incremental backup.
+
+```bash
+$ hbase backup merge
+```
+
+#### Positional Command-Line Arguments [#backup-restore-commands-merging-incremental-backup-images-positional-command-line-arguments]
+
+**_backup_ids_**
+A comma-separated list of incremental backup image IDs that are to be combined into a single image.
+
+#### Named Command-Line Arguments [#backup-restore-commands-merging-incremental-backup-images-named-command-line-arguments]
+
+None.
+
+#### Example usage [#backup-restore-commands-merging-incremental-backup-images-example-usage]
+
+```bash
+$ hbase backup merge backupId_1467823988425,backupId_1467827588425
+```
+
+### Using Backup Sets
+
+Backup sets can ease the administration of HBase data backups and restores by reducing the amount of repetitive input
+of table names. You can group tables into a named backup set with the `hbase backup set add` command. You can then use
+the `-set` option to invoke the name of a backup set in the `hbase backup create` or `hbase restore` rather than list
+individually every table in the group. You can have multiple backup sets.
+
+
+ Note the differentiation between the `hbase backup set add` command and the _-set_ option. The
+ `hbase backup set add` command must be run before using the `-set` option in a different command
+ because backup sets must be named and defined before using backup sets as a shortcut.
+
+
+If you run the `hbase backup set add` command and specify a backup set name that does not yet exist on your system, a new set
+is created. If you run the command with the name of an existing backup set name, then the tables that you specify are added
+to the set.
+
+In this command, the backup set name is case-sensitive.
+
+
+ The metadata of backup sets are stored within HBase. If you do not have access to the original
+ HBase cluster with the backup set metadata, then you must specify individual table names to
+ restore the data.
+
+
+To create a backup set, run the following command as the HBase superuser:
+
+```bash
+$ hbase backup set
+```
+
+#### Backup Set Subcommands
+
+The following list details subcommands of the hbase backup set command.
+
+
+ You must enter one (and no more than one) of the following subcommands after hbase backup set to
+ complete an operation. Also, the backup set name is case-sensitive in the command-line utility.
+
+
+**_add_**
+Adds table\[s\] to a backup set. Specify a _backup_set_name_ value after this argument to create a backup set.
+
+**_remove_**
+Removes tables from the set. Specify the tables to remove in the tables argument.
+
+**_list_**
+Lists all backup sets.
+
+**_describe_**
+Displays a description of a backup set. The information includes whether the set has full
+or incremental backups, start and end times of the backups, and a list of the tables in the set. This subcommand must precede
+a valid value for the _backup_set_name_ value.
+
+**_delete_**
+Deletes a backup set. Enter the value for the _backup_set_name_ option directly after the `hbase backup set delete` command.
+
+#### Positional Command-Line Arguments [#backup-restore-commands-using-backup-sets-positional-command-line-arguments]
+
+**_backup_set_name_**
+Use to assign or invoke a backup set name. The backup set name must contain only printable characters and cannot have any spaces.
+
+**_tables_**
+List of tables (or a single table) to include in the backup set. Enter the table names as a comma-separated list. If no tables
+are specified, all tables are included in the set.
+
+
+ Maintain a log or other record of the case-sensitive backup set names and the corresponding tables
+ in each set on a separate or remote cluster, backup strategy. This information can help you in
+ case of failure on the primary cluster.
+
+
+#### Example of Usage
+
+```bash
+$ hbase backup set add Q1Data TEAM3,TEAM_4
+```
+
+Depending on the environment, this command results in _one_ of the following actions:
+
+- If the `Q1Data` backup set does not exist, a backup set containing tables `TEAM_3` and `TEAM_4` is created.
+- If the `Q1Data` backup set exists already, the tables `TEAM_3` and `TEAM_4` are added to the `Q1Data` backup set.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/index.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/index.mdx
new file mode 100644
index 000000000000..7a69147ee05c
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/index.mdx
@@ -0,0 +1,4 @@
+---
+title: "Backup and Restore"
+description: "Comprehensive guide to HBase backup and restore capabilities for data protection, disaster recovery, and point-in-time recovery."
+---
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/meta.json b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/meta.json
new file mode 100644
index 000000000000..904a4754a003
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/meta.json
@@ -0,0 +1,9 @@
+{
+ "title": "Backup and Restore",
+ "pages": [
+ "overview",
+ "commands",
+ "administration",
+ "additional-topics"
+ ]
+}
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/overview.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/overview.mdx
new file mode 100644
index 000000000000..c6faca7bfe2b
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/backup-restore/overview.mdx
@@ -0,0 +1,157 @@
+---
+title: "Overview"
+description: "Introduction to HBase backup and restore feature, including full and incremental backups for disaster recovery and point-in-time recovery."
+---
+
+Backup and restore is a standard operation provided by many databases. An effective backup and restore
+strategy helps ensure that users can recover data in case of unexpected failures. The HBase backup and restore
+feature helps ensure that enterprises using HBase as a canonical data repository can recover from catastrophic
+failures. Another important feature is the ability to restore the database to a particular
+point-in-time, commonly referred to as a snapshot.
+
+The HBase backup and restore feature provides the ability to create full backups and incremental backups on
+tables in an HBase cluster. The full backup is the foundation on which incremental backups are applied
+to build iterative snapshots. Incremental backups can be run on a schedule to capture changes over time,
+for example by using a Cron task. Incremental backups are more cost-effective than full backups because they only capture
+the changes since the last backup and they also enable administrators to restore the database to any prior incremental backup. Furthermore, the
+utilities also enable table-level data backup-and-recovery if you do not want to restore the entire dataset
+of the backup.
+
+The backup and restore feature supplements the HBase Replication feature. While HBase replication is ideal for
+creating "hot" copies of the data (where the replicated data is immediately available for query), the backup and
+restore feature is ideal for creating "cold" copies of data (where a manual step must be taken to restore the system).
+Previously, users only had the ability to create full backups via the ExportSnapshot functionality. The incremental
+backup implementation is the novel improvement over the previous "art" provided by ExportSnapshot.
+
+The backup and restore feature uses DistCp to transfer files between clusters .
+[HADOOP-15850](https://issues.apache.org/jira/browse/HADOOP-15850) fixes a bug where CopyCommitter#concatFileChunks
+unconditionally tried to concatenate the files being DistCp'ed to target cluster (though the files are
+independent) . Without the fix from
+[HADOOP-15850](https://issues.apache.org/jira/browse/HADOOP-15850) , the transfer would fail.
+So the backup and restore feature need hadoop version as below
+
+- 2.7.x
+- 2.8.x
+- 2.9.2+
+- 2.10.0+
+- 3.0.4+
+- 3.1.2+
+- 3.2.0+
+- 3.3.0+
+
+## Terminology
+
+The backup and restore feature introduces new terminology which can be used to understand how control flows through the
+system.
+
+- _A backup_: A logical unit of data and metadata which can restore a table to its state at a specific point in time.
+- _Full backup_: a type of backup which wholly encapsulates the contents of the table at a point in time.
+- _Incremental backup_: a type of backup which contains the changes in a table since a full backup.
+- _Backup set_: A user-defined name which references one or more tables over which a backup can be executed.
+- _Backup ID_: A unique names which identifies one backup from the rest, e.g. `backupId_1467823988425`
+
+## Planning
+
+There are some common strategies which can be used to implement backup and restore in your environment. The following section
+shows how these strategies are implemented and identifies potential tradeoffs with each.
+
+
+ This backup and restore tools has not been tested on Transparent Data Encryption (TDE) enabled
+ HDFS clusters. This is related to the open issue
+ [HBASE-16178](https://issues.apache.org/jira/browse/HBASE-16178).
+
+
+### Backup within a cluster
+
+This strategy stores the backups on the same cluster as where the backup was taken. This approach is only appropriate for testing
+as it does not provide any additional safety on top of what the software itself already provides.
+
+
+
+### Backup using a dedicated cluster
+
+This strategy provides greater fault tolerance and provides a path towards disaster recovery. In this setting, you will
+store the backup on a separate HDFS cluster by supplying the backup destination cluster's HDFS URL to the backup utility.
+You should consider backing up to a different physical location, such as a different data center.
+
+Typically, a backup-dedicated HDFS cluster uses a more economical hardware profile to save money.
+
+
+
+### Backup to the Cloud or a storage vendor appliance
+
+Another approach to safeguarding HBase incremental backups is to store the data on provisioned, secure servers that belong
+to third-party vendors and that are located off-site. The vendor can be a public cloud provider or a storage vendor who uses
+a Hadoop-compatible file system, such as S3 and other HDFS-compatible destinations.
+
+
+
+
+ The HBase backup utility does not support backup to multiple destinations. A workaround is to
+ manually create copies of the backup files from HDFS or S3.
+
+
+## First-time configuration steps
+
+This section contains the necessary configuration changes that must be made in order to use the backup and restore feature.
+As this feature makes significant use of YARN's MapReduce framework to parallelize these I/O heavy operations, configuration
+changes extend outside of just `hbase-site.xml`.
+
+### Allow the "hbase" system user in YARN
+
+The YARN _container-executor.cfg_ configuration file must have the following property setting: _allowed.system.users=hbase_. No spaces
+are allowed in entries of this configuration file.
+
+
+ Skipping this step will result in runtime errors when executing the first backup tasks.
+
+
+**Example of a valid container-executor.cfg file for backup and restore:**
+
+```text
+yarn.nodemanager.log-dirs=/var/log/hadoop/mapred
+yarn.nodemanager.linux-container-executor.group=yarn
+banned.users=hdfs,yarn,mapred,bin
+allowed.system.users=hbase
+min.user.id=500
+```
+
+### HBase specific changes
+
+Add the following properties to hbase-site.xml and restart HBase if it is already running.
+
+
+ The ",..." is an ellipsis meant to imply that this is a comma-separated list of values, not
+ literal text which should be added to hbase-site.xml.
+
+
+```xml
+
+ hbase.backup.enable
+ true
+
+
+ hbase.master.logcleaner.plugins
+ org.apache.hadoop.hbase.backup.master.BackupLogCleaner,...
+
+
+ hbase.procedure.master.classes
+ org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager,...
+
+
+ hbase.procedure.regionserver.classes
+ org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager,...
+
+
+ hbase.coprocessor.region.classes
+ org.apache.hadoop.hbase.backup.BackupObserver,...
+
+
+ hbase.coprocessor.master.classes
+ org.apache.hadoop.hbase.backup.BackupMasterObserver,...
+
+
+ hbase.master.hfilecleaner.plugins
+ org.apache.hadoop.hbase.backup.BackupHFileCleaner,...
+
+```
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/announcing.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/announcing.mdx
new file mode 100644
index 000000000000..d138ff206e23
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/announcing.mdx
@@ -0,0 +1,44 @@
+---
+title: "Announcing Releases"
+description: "Email template and guidelines for announcing new HBase releases to the community and mailing lists."
+---
+
+Once an RC has passed successfully and the needed artifacts have been staged for disribution, you'll need to let everyone know about our shiny new release. It's not a requirement, but to make things easier for release managers we have a template you can start with. Be sure you replace \_version\_ and other markers with the relevant version numbers. You should manually verify all links before sending.
+
+```text
+The HBase team is happy to announce the immediate availability of HBase _version_.
+
+Apache HBase™ is an open-source, distributed, versioned, non-relational database.
+Apache HBase gives you low latency random access to billions of rows with
+millions of columns atop non-specialized hardware. To learn more about HBase,
+see https://hbase.apache.org/.
+
+HBase _version_ is the _nth_ minor release in the HBase _major_.x line, which aims to
+improve the stability and reliability of HBase. This release includes roughly
+XXX resolved issues not covered by previous _major_.x releases.
+
+Notable new features include:
+- List text descriptions of features that fit on one line
+- Including if JDK or Hadoop support versions changes
+- If the "stable" pointer changes, call that out
+- For those with obvious JIRA IDs, include them (HBASE-YYYYY)
+
+The full list of issues can be found in the included CHANGES.md and RELEASENOTES.md,
+or via our issue tracker:
+
+ https://s.apache.org/hbase-_version_-jira
+
+To download please follow the links and instructions on our website:
+
+ https://hbase.apache.org/downloads.html
+
+
+Question, comments, and problems are always welcome at: dev@hbase.apache.org.
+
+Thanks to all who contributed and made this release possible.
+
+Cheers,
+The HBase Dev Team
+```
+
+You should sent this message to the following lists: dev@hbase.apache.org, user@hbase.apache.org, announce@apache.org. If you'd like a spot check before sending, feel free to ask via jira or the dev list.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/building.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/building.mdx
new file mode 100644
index 000000000000..9e4f7c2fb26e
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/building.mdx
@@ -0,0 +1,297 @@
+---
+title: "Building Apache HBase"
+description: "Maven build commands, JDK requirements, Hadoop version selection, and building HBase from source including protobuf and thrift generation."
+---
+
+## Basic Compile
+
+HBase is compiled using Maven. You must use at least Maven 3.0.4. To check your Maven version, run
+the command `mvn -version`.
+
+### JDK Version Requirements [#building-basic-compile-jdk-version-requirements]
+
+HBase has Java version compiler requirements that vary by release branch. At compilation time,
+HBase has the same version requirements as it does for runtime. See [Java](/docs/configuration/basic-prerequisites#configuration-basic-prerequisites-java) for a complete
+support matrix of Java version by HBase version.
+
+### Maven Build Commands
+
+All commands are executed from the local HBase project directory.
+
+#### Package
+
+The simplest command to compile HBase from its java source code is to use the `package` target, which builds JARs with the compiled files.
+
+```bash
+mvn package -DskipTests
+```
+
+Or, to clean up before compiling:
+
+```bash
+mvn clean package -DskipTests
+```
+
+With Eclipse set up as explained above in [Eclipse](/docs/building-and-developing#eclipse), you can also use the **Build** command in Eclipse.
+To create the full installable HBase package takes a little bit more work, so read on.
+
+#### Compile
+
+The `compile` target does not create the JARs with the compiled files.
+
+```bash
+mvn compile
+```
+
+```bash
+mvn clean compile
+```
+
+#### Install
+
+To install the JARs in your _~/.m2/_ directory, use the `install` target.
+
+```bash
+mvn install
+```
+
+```bash
+mvn clean install
+```
+
+```bash
+mvn clean install -DskipTests
+```
+
+#### Building HBase 2.x on Apple Silicon
+
+Building a non-master branch requires protoc 2.5.0 binary which is not available for Apple Silicon.
+HBASE-27741 added a workaround to the build to fall back to osx-x86_64 version of protoc automatically by `apple-silicon-workaround` Maven profile.
+The intention is that this change will permit the build to proceed with the x86 version of `protoc`, making use of the Rosetta instruction translation service built into the OS.
+If you'd like to provide and make use of your own aarch_64 `protoc`, you can disable this profile on the command line by adding `-P'!apple-silicon-workaround'`, or through configuration in your `settings.xml`.
+
+You can use the following commands to build protoc on your Apple Silicon machine.
+
+```bash
+curl -sSL https://github.com/protocolbuffers/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz | tar zx -
+cd protobuf-2.5.0
+curl -L -O https://gist.githubusercontent.com/liusheng/64aee1b27de037f8b9ccf1873b82c413/raw/118c2fce733a9a62a03281753572a45b6efb8639/protobuf-2.5.0-arm64.patch
+patch -p1 < protobuf-2.5.0-arm64.patch
+./configure --disable-shared
+make
+mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=2.5.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=src/protoc
+```
+
+### Running all or individual Unit Tests
+
+See the [Running tests](/docs/building-and-developing/tests#running-tests) section in [Unit Tests](/docs/building-and-developing/tests#building-and-developing-unit-tests)
+
+### Building against various Hadoop versions
+
+HBase supports building against Apache Hadoop versions: 2.y and 3.y (early release artifacts).
+Exactly which version of Hadoop is used by default varies by release branch. See the section
+[Hadoop](/docs/configuration/basic-prerequisites#hadoop) for the complete breakdown of supported Hadoop version by HBase release.
+
+The mechanism for selecting a Hadoop version at build time is identical across all releases. Which
+version of Hadoop is default varies. We manage Hadoop major version selection by way of Maven
+profiles. Due to the peculiarities of Maven profile mutual exclusion, the profile that builds
+against a particular Hadoop version is activated by setting a property, _not_ the usual profile
+activation. Hadoop version profile activation is summarized by the following table.
+
+#### Hadoop Profile Activation by HBase Release
+
+| | Hadoop2 Activation | Hadoop3 Activation |
+| ---------- | ------------------- | ---------------------- |
+| HBase 1.3+ | _active by default_ | `-Dhadoop.profile=3.0` |
+| HBase 3.0+ | _not supported_ | _active by default_ |
+
+
+ Please note that where a profile is active by default, `hadoop.profile` must NOT be provided.
+
+
+Once the Hadoop major version profile is activated, the exact Hadoop version can be
+specified by overriding the appropriate property value. For Hadoop2 versions, the property name
+is `hadoop-two.version`. With Hadoop3 versions, the property name is `hadoop-three.version`.
+
+#### Example 1: Building HBase 1.7 against Hadoop 2.10.0
+
+For example, to build HBase 1.7 against Hadoop 2.10.0, the profile is set for Hadoop2 by default,
+so only `hadoop-two.version` must be specified:
+
+```bash
+git checkout branch-1
+mvn -Dhadoop-two.version=2.10.0 ...
+```
+
+#### Example 2: Building HBase 2.3 or 2.4 against Hadoop 3.4.0-SNAPSHOT
+
+This is how a developer might check the compatibility of HBase 2.3 or 2.4 against an unreleased
+Hadoop version (currently 3.4). Both the Hadoop3 profile and version must be specified:
+
+```bash
+git checkout branch-2.4
+mvn -Dhadoop.profile=3.0 -Dhadoop-three.version=3.4.0-SNAPSHOT ...
+```
+
+#### Example 3: Building HBase 3.0 against Hadoop 3.4.0-SNAPSHOT
+
+The same developer might want also to check the development version of HBase (currently 3.0)
+against the development version of Hadoop (currently 3.4). In this case, the Hadoop3 profile is
+active by default, so only `hadoop-three.version` must be specified:
+
+```bash
+git checkout master
+mvn -Dhadoop-three.version=3.4.0-SNAPSHOT ...
+```
+
+### Building with JDK11 and Hadoop3
+
+HBase manages JDK-specific build settings using Maven profiles. The profile appropriate to the JDK
+in use is automatically activated. Building and running on JDK8 supports both Hadoop2 and Hadoop3.
+For JDK11, only Hadoop3 is supported. Thus, the Hadoop3 profile must be active when building on
+JDK11, and the artifacts used when running HBase on JDK11 must be compiled against Hadoop3.
+Furthermore, the JDK11 profile requires a minimum Hadoop version of 3.2.0. This value is specified
+by the JDK11 profile, but it can be overridden using the `hadoop-three.version` property as normal.
+For details on Hadoop profile activation by HBase branch, see
+[Building against various Hadoop versions](/docs/building-and-developing/building#building-against-various-hadoop-versions). See [Java](/docs/configuration/basic-prerequisites#configuration-basic-prerequisites-java) for a complete
+support matrix of Java version by HBase version.
+
+#### Example 1: Building HBase 2.3 or 2.4 with JDK11
+
+To build HBase 2.3 or 2.4 with JDK11, the Hadoop3 profile must be activated explicitly.
+
+```bash
+git checkout branch-2.4
+JAVA_HOME=/usr/lib/jvm/java-11 mvn -Dhadoop.profile=3.0 ...
+```
+
+#### Example 2: Building HBase 3.0 with JDK11
+
+For HBase 3.0, the Hadoop3 profile is active by default, no additional properties need be
+specified.
+
+```bash
+git checkout master
+JAVA_HOME=/usr/lib/jvm/java-11 mvn ...
+```
+
+### Building and testing in an IDE with JDK11 and Hadoop3
+
+Continuing the discussion from the [earlier section](/docs/building-and-developing/building#building-with-jdk11-and-hadoop3), building and
+testing with JDK11 and Hadoop3 within an IDE may require additional configuration. Specifically,
+make sure the JVM version used by the IDE is a JDK11, the active JDK Maven profile is for JDK11,
+and the Maven profile for JDK8 is NOT active. Likewise, ensure the Hadoop3 Maven profile is active
+and the Hadoop2 Maven profile is NOT active.
+
+### Build Protobuf
+
+You may need to change the protobuf definitions that reside in the _hbase-protocol_ module or other modules.
+
+Previous to hbase-2.0.0, protobuf definition files were sprinkled across all hbase modules but now all
+to do with protobuf must reside in the hbase-protocol module; we are trying to contain our protobuf
+use so we can freely change versions without upsetting any downstream project use of protobuf.
+
+The protobuf files are located in _hbase-protocol/src/main/protobuf_.
+For the change to be effective, you will need to regenerate the classes.
+
+```bash
+mvn package -pl hbase-protocol -am
+```
+
+Similarly, protobuf definitions for internal use are located in the _hbase-protocol-shaded_ module.
+
+```bash
+mvn package -pl hbase-protocol-shaded -am
+```
+
+Typically, protobuf code generation is done using the native `protoc` binary. In our build we use a maven plugin for
+convenience; however, the plugin may not be able to retrieve appropriate binaries for all platforms. If you find yourself
+on a platform where protoc fails, you will have to compile protoc from source, and run it independent of our maven build.
+You can disable the inline code generation by specifying `-Dprotoc.skip` in your maven arguments, allowing your build to proceed further.
+
+
+ If you need to manually generate your protobuf files, you should not use `clean` in subsequent
+ maven calls, as that will delete the newly generated files.
+
+
+Read the _hbase-protocol/README.txt_ for more details
+
+### Build Thrift
+
+You may need to change the thrift definitions that reside in the _hbase-thrift_ module or other modules.
+
+The thrift files are located in _hbase-thrift/src/main/resources_.
+For the change to be effective, you will need to regenerate the classes.
+You can use maven profile `compile-thrift` to do this.
+
+```bash
+mvn compile -Pcompile-thrift
+```
+
+You may also want to define `thrift.path` for the thrift binary, using the following command:
+
+```bash
+mvn compile -Pcompile-thrift -Dthrift.path=/opt/local/bin/thrift
+```
+
+### Build a Tarball
+
+You can build a tarball without going through the release process described in [Releasing Apache HBase](/docs/building-and-developing/releasing), by running the following command:
+
+```bash
+mvn -DskipTests clean install && mvn -DskipTests package assembly:single
+```
+
+The distribution tarball is built in `hbase-assembly/target/hbase--bin.tar.gz`.
+
+You can install or deploy the tarball by having the assembly:single goal before install or deploy in the maven command:
+
+```bash
+mvn -DskipTests package assembly:single install
+```
+
+```bash
+mvn -DskipTests package assembly:single deploy
+```
+
+### Build Gotchas
+
+#### Maven Site failure
+
+If you see `Unable to find resource 'VM_global_library.vm'`, ignore it.
+It's not an error.
+It is [officially ugly](https://issues.apache.org/jira/browse/MSITE-286) though.
+
+## Build On Linux Aarch64
+
+HBase runs on both Windows and UNIX-like systems, and it should run on any platform
+that runs a supported version of Java. This should include JVMs on x86_64 and aarch64.
+The documentation below describes how to build hbase on aarch64 platform.
+
+### Set Environment Variables
+
+Manually install Java and Maven on aarch64 servers if they are not installed,
+and set environment variables. For example:
+
+```bash
+export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-arm64
+export MAVEN_HOME=/opt/maven
+export PATH=${MAVEN_HOME}/bin:${JAVA_HOME}/bin:${PATH}
+```
+
+### Use Protobuf Supported On Aarch64
+
+Now HBase uses protobuf of two versions. Version '3.11.4' of protobuf that hbase uses
+internally and version '2.5.0' as external usage.
+Package protoc-2.5.0 does not work on aarch64 platform, we should add maven
+profile '-Paarch64' when building. It downloads protoc-2.5.0 package from maven
+repository which we made on aarch64 platform locally.
+
+```bash
+mvn clean install -Paarch64 -DskipTests
+```
+
+
+ Protobuf is released with aarch64 protoc since version '3.5.0', and we are planning to upgrade
+ protobuf later, then we don't have to add the profile '-Paarch64' anymore.
+
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/developer-guidelines.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/developer-guidelines.mdx
new file mode 100644
index 000000000000..318742411874
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/developer-guidelines.mdx
@@ -0,0 +1,809 @@
+---
+title: "Developer Guidelines"
+description: "Code standards, interface classifications, formatting conventions, Git best practices, and patch submission guidelines for HBase contributors."
+---
+
+## Branches
+
+We use Git for source code management and latest development happens on `master` branch. There are
+branches for past major/minor/maintenance releases and important features and bug fixes are often
+back-ported to them.
+
+## Policy for Fix Version in JIRA
+
+To determine if a given fix is in a given release purely from the release numbers following rules
+are defined:
+
+Fix version of X.Y.Z => fixed in all releases X.Y.Z' (where Z' = Z).
+Fix version of X.Y.0 => fixed in all releases X.Y'.\* (where Y' = Y).
+Fix version of X.0.0 => fixed in all releases X'.\*.\* (where X' = X).
+
+By this policy, fix version of 1.3.0 implies 1.4.0, but 1.3.2 does not imply 1.4.0 as we could not
+tell purely from the numbers which release came first.
+
+## Code Standards
+
+### Interface Classifications
+
+Interfaces are classified both by audience and by stability level.
+These labels appear at the head of a class.
+The conventions followed by HBase are inherited by its parent project, Hadoop.
+
+The following interface classifications are commonly used:
+
+#### InterfaceAudience
+
+`@InterfaceAudience.Public`
+APIs for users and HBase applications.
+These APIs will be deprecated through major versions of HBase.
+
+`@InterfaceAudience.Private`
+APIs for HBase internals developers.
+No guarantees on compatibility or availability in future versions.
+Private interfaces do not need an `@InterfaceStability` classification.
+
+`@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)`
+APIs for HBase coprocessor writers.
+
+**No `@InterfaceAudience` Classification**:
+Packages without an `@InterfaceAudience` label are considered private.
+Mark your new packages if publicly accessible.
+
+
+ Only interfaces classified `@InterfaceAudience.Public` should be included in API documentation
+ (Javadoc). Committers must add new package excludes `ExcludePackageNames` section of the _pom.xml_
+ for new packages which do not contain public classes.
+
+
+#### @InterfaceStability
+
+`@InterfaceStability` is important for packages marked `@InterfaceAudience.Public`.
+
+`@InterfaceStability.Stable`
+Public packages marked as stable cannot be changed without a deprecation path or a very good reason.
+
+`@InterfaceStability.Unstable`
+Public packages marked as unstable can be changed without a deprecation path.
+
+`@InterfaceStability.Evolving`
+Public packages marked as evolving may be changed, but it is discouraged.
+
+**No `@InterfaceStability` Label**:
+Public classes with no `@InterfaceStability` label are discouraged, and should be considered implicitly unstable.
+
+If you are unclear about how to mark packages, ask on the development list.
+
+### Code Formatting Conventions
+
+Please adhere to the following guidelines so that your patches can be reviewed more quickly.
+These guidelines have been developed based upon common feedback on patches from new contributors.
+
+See the [Code Conventions for the Java Programming Language](http://www.oracle.com/technetwork/java/index-135089.html) for more information on coding conventions in Java.
+See [Eclipse Code Formatting](/docs/building-and-developing#code-formatting) to setup Eclipse to check for some of
+these guidelines automatically.
+
+#### Space Invaders
+
+Do not use extra spaces around brackets.
+Use the second style, rather than the first.
+
+```java
+if ( foo.equals( bar ) ) { // don't do this
+```
+
+```java
+if (foo.equals(bar)) {
+```
+
+```java
+foo = barArray[ i ]; // don't do this
+```
+
+```java
+foo = barArray[i];
+```
+
+#### Auto Generated Code
+
+Auto-generated code in Eclipse often uses bad variable names such as `arg0`.
+Use more informative variable names.
+Use code like the second example here.
+
+```java
+ public void readFields(DataInput arg0) throws IOException { // don't do this
+ foo = arg0.readUTF(); // don't do this
+```
+
+```java
+ public void readFields(DataInput di) throws IOException {
+ foo = di.readUTF();
+```
+
+#### Long Lines
+
+Keep lines less than 100 characters.
+You can configure your IDE to do this automatically.
+
+```java
+Bar bar = foo.veryLongMethodWithManyArguments(argument1, argument2, argument3, argument4, argument5, argument6, argument7, argument8, argument9); // don't do this
+```
+
+```java
+Bar bar = foo.veryLongMethodWithManyArguments(
+ argument1, argument2, argument3,argument4, argument5, argument6, argument7, argument8, argument9);
+```
+
+#### Trailing Spaces
+
+Be sure there is a line break after the end of your code, and avoid lines with nothing but whitespace.
+This makes diffs more meaningful.
+You can configure your IDE to help with this.
+
+```java
+Bar bar = foo.getBar(); <--- imagine there is an extra space(s) after the semicolon.
+```
+
+#### API Documentation (Javadoc)
+
+Don't forget Javadoc!
+
+Javadoc warnings are checked during precommit.
+If the precommit tool gives you a '-1', please fix the javadoc issue.
+Your patch won't be committed if it adds such warnings.
+
+Also, no `@author` tags - that's a rule.
+
+#### Findbugs
+
+`Findbugs` is used to detect common bugs pattern.
+It is checked during the precommit build.
+If errors are found, please fix them.
+You can run findbugs locally with `mvn
+ findbugs:findbugs`, which will generate the `findbugs` files locally.
+Sometimes, you may have to write code smarter than `findbugs`.
+You can annotate your code to tell `findbugs` you know what you're doing, by annotating your class with the following annotation:
+
+```java
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+value="HE_EQUALS_USE_HASHCODE",
+justification="I know what I'm doing")
+```
+
+It is important to use the Apache-licensed version of the annotations. That generally means using
+annotations in the `edu.umd.cs.findbugs.annotations` package so that we can rely on the cleanroom
+reimplementation rather than annotations in the `javax.annotations` package.
+
+#### Javadoc - Useless Defaults
+
+Don't just leave javadoc tags the way IDE generates them, or fill redundant information in them.
+
+```java
+ /**
+ * @param table <---- don't leave them empty!
+ * @param region An HRegion object. <---- don't fill redundant information!
+ * @return Foo Object foo just created. <---- Not useful information
+ * @throws SomeException <---- Not useful. Function declarations already tell that!
+ * @throws BarException when something went wrong <---- really?
+ */
+ public Foo createFoo(Bar bar);
+```
+
+Either add something descriptive to the tags, or just remove them.
+The preference is to add something descriptive and useful.
+
+#### One Thing At A Time, Folks
+
+If you submit a patch for one thing, don't do auto-reformatting or unrelated reformatting of code on a completely different area of code.
+
+Likewise, don't add unrelated cleanup or refactorings outside the scope of your Jira.
+
+#### Ambiguous Unit Tests
+
+Make sure that you're clear about what you are testing in your unit tests and why.
+
+### Garbage-Collection Conserving Guidelines
+
+The following guidelines were borrowed from http://engineering.linkedin.com/performance/linkedin-feed-faster-less-jvm-garbage.
+Keep them in mind to keep preventable garbage collection to a minimum. Have a look
+at the blog post for some great examples of how to refactor your code according to
+these guidelines.
+
+- Be careful with Iterators
+- Estimate the size of a collection when initializing
+- Defer expression evaluation
+- Compile the regex patterns in advance
+- Cache it if you can
+- String Interns are useful but dangerous
+
+## Invariants
+
+We don't have many but what we have we list below.
+All are subject to challenge of course but until then, please hold to the rules of the road.
+
+### No permanent state in ZooKeeper
+
+ZooKeeper state should transient (treat it like memory). If ZooKeeper state is deleted, hbase should be able to recover and essentially be in the same state.
+
+- .Exceptions: There are currently a few exceptions that we need to fix around whether a table is enabled or disabled.
+- Replication data is currently stored only in ZooKeeper.
+ Deleting ZooKeeper data related to replication may cause replication to be disabled.
+ Do not delete the replication tree, _/hbase/replication/_.
+
+
+ Replication may be disrupted and data loss may occur if you delete the replication tree
+ (_/hbase/replication/_) from ZooKeeper. Follow progress on this issue at
+ [HBASE-10295](https://issues.apache.org/jira/browse/HBASE-10295).
+
+
+## Running In-Situ
+
+If you are developing Apache HBase, frequently it is useful to test your changes against a more-real cluster than what you find in unit tests.
+In this case, HBase can be run directly from the source in local-mode.
+All you need to do is run:
+
+```bash
+${HBASE_HOME}/bin/start-hbase.sh
+```
+
+This will spin up a full local-cluster, just as if you had packaged up HBase and installed it on your machine.
+
+Keep in mind that you will need to have installed HBase into your local maven repository for the in-situ cluster to work properly.
+That is, you will need to run:
+
+```bash
+mvn clean install -DskipTests
+```
+
+to ensure that maven can find the correct classpath and dependencies.
+Generally, the above command is just a good thing to try running first, if maven is acting oddly.
+
+## Adding Metrics
+
+After adding a new feature a developer might want to add metrics.
+HBase exposes metrics using the Hadoop Metrics 2 system, so adding a new metric involves exposing that metric to the hadoop system.
+Unfortunately the API of metrics2 changed from hadoop 1 to hadoop 2.
+In order to get around this a set of interfaces and implementations have to be loaded at runtime.
+To get an in-depth look at the reasoning and structure of these classes you can read the blog post located [here](https://blogs.apache.org/hbase/entry/migration_to_the_new_metrics).
+To add a metric to an existing MBean follow the short guide below:
+
+### Add Metric name and Function to Hadoop Compat Interface.
+
+Inside of the source interface the corresponds to where the metrics are generated (eg MetricsMasterSource for things coming from HMaster) create new static strings for metric name and description.
+Then add a new method that will be called to add new reading.
+
+### Add the Implementation to Both Hadoop 1 and Hadoop 2 Compat modules.
+
+Inside of the implementation of the source (eg.
+MetricsMasterSourceImpl in the above example) create a new histogram, counter, gauge, or stat in the init method.
+Then in the method that was added to the interface wire up the parameter passed in to the histogram.
+
+Now add tests that make sure the data is correctly exported to the metrics 2 system.
+For this the MetricsAssertHelper is provided.
+
+## Git Best Practices
+
+**Avoid git merges.**
+Use `git pull --rebase` or `git fetch` followed by `git rebase`.
+
+**Do not use `git push --force`.**
+If the push does not work, fix the problem or ask for help.
+
+Please contribute to this document if you think of other Git best practices.
+
+### `rebase_all_git_branches.sh`
+
+The _dev-support/rebase_all_git_branches.sh_ script is provided to help keep your Git repository clean.
+Use the `-h` parameter to get usage instructions.
+The script automatically refreshes your tracking branches, attempts an automatic rebase of each local branch against its remote branch, and gives you the option to delete any branch which represents a closed `HBASE-` JIRA.
+The script has one optional configuration option, the location of your Git directory.
+You can set a default by editing the script.
+Otherwise, you can pass the git directory manually by using the `-d` parameter, followed by an absolute or relative directory name, or even '.' for the current working directory.
+The script checks the directory for sub-directory called _.git/_, before proceeding.
+
+## Submitting Patches
+
+If you are new to submitting patches to open source or new to submitting patches to Apache, start by
+reading the [On Contributing Patches](https://commons.apache.org/patches.html) page from
+[Apache Commons Project](https://commons.apache.org/).
+It provides a nice overview that applies equally to the Apache HBase Project.
+
+Make sure you review [Code Formatting Conventions](/docs/building-and-developing/developer-guidelines#code-formatting-conventions) for code style. If your patch
+was generated incorrectly or your code does not adhere to the code formatting guidelines, you may
+be asked to redo some work.
+
+HBase enforces code style via a maven plugin. After you've written up your changes, apply the
+formatter before committing.
+
+```bash
+$ mvn spotless:apply
+```
+
+When your commit is ready, present it to the community as a
+[GitHub Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests).
+
+### Few general guidelines
+
+- Always patch against the master branch first, even if you want to patch in another branch.
+ HBase committers always apply patches first to the master branch, and backport as necessary. For
+ complex patches, you may be asked to perform the backport(s) yourself.
+- Submit one single PR for a single fix. If necessary, squash local commits to merge local commits
+ into a single one first. See this
+ [Stack Overflow
+ question](http://stackoverflow.com/questions/5308816/how-to-use-git-merge-squash) for more information about squashing commits.
+- Please understand that not every patch may get committed, and that feedback will likely be
+ provided on the patch.
+
+### Unit Tests [#developer-guidelines-submitting-patches-unit-tests]
+
+Always add and/or update relevant unit tests when making the changes.
+Make sure that new/changed unit tests pass locally before submitting the patch because it is faster
+than waiting for presubmit result which runs full test suite. This will save your own time and
+effort.
+Use [Mockito](https://site.mockito.org/) to make mocks which are very useful for testing failure scenarios by
+injecting appropriate failures.
+
+If you are creating a new unit test class, notice how other unit test classes have
+classification/sizing annotations before class name and a static methods for setup/teardown of
+testing environment. Be sure to include annotations in any new unit test files.
+See [Tests](/docs/building-and-developing/tests) for more information on tests.
+
+### Integration Tests [#developer-guidelines-submitting-patches-integration-tests]
+
+Significant new features should provide an integration test in addition to unit tests, suitable for exercising the new feature at different points in its configuration space.
+
+### ReviewBoard
+
+Patches larger than one screen, or patches that will be tricky to review, should go through [ReviewBoard](https://reviews.apache.org).
+
+**Procedure: Use ReviewBoard**
+
+
+
+
+
+Register for an account if you don't already have one.
+It does not use the credentials from [issues.apache.org](https://issues.apache.org).
+Log in.
+
+
+
+
+
+Click **New Review Request**.
+
+
+
+
+
+Choose the `hbase-git` repository.
+Click Choose File to select the diff and optionally a parent diff.
+Click **Create Review Request**.
+
+
+
+
+
+Fill in the fields as required.
+At the minimum, fill in the **Summary** and choose `hbase` as the **Review Group**.
+If you fill in the **Bugs** field, the review board links back to the relevant JIRA.
+The more fields you fill in, the better.
+Click **Publish** to make your review request public.
+An email will be sent to everyone in the `hbase` group, to review the patch.
+
+
+
+
+
+Back in your JIRA, click , and paste in the URL of your ReviewBoard request.
+This attaches the ReviewBoard to the JIRA, for easy access.
+
+
+
+
+
+To cancel the request, click .
+
+
+
+
+
+For more information on how to use ReviewBoard, see [the ReviewBoard
+documentation](http://www.reviewboard.org/docs/manual/1.5/).
+
+### GitHub
+
+Submitting [GitHub](https://github.com/apache/hbase) pull requests is another accepted form of
+contributing patches. Refer to GitHub [documentation](https://help.github.com/) for details on
+how to create pull requests.
+
+
+ This section is incomplete and needs to be updated. Refer to
+ [HBASE-23557](https://issues.apache.org/jira/browse/HBASE-23557)
+
+
+#### GitHub Tooling
+
+**Browser bookmarks**
+
+Following is a useful javascript based browser bookmark that redirects from GitHub pull
+requests to the corresponding jira work item. This redirects based on the HBase jira ID mentioned
+in the issue title for the PR. Add the following javascript snippet as a browser bookmark to the
+tool bar. Clicking on it while you are on an HBase GitHub PR page redirects you to the corresponding
+jira item.
+
+```js
+location.href =
+ "https://issues.apache.org/jira/browse/" +
+ document.getElementsByClassName("js-issue-title")[0].innerHTML.match(/HBASE-\d+/)[0];
+```
+
+### Guide for HBase Committers
+
+#### Becoming a committer
+
+Committers are responsible for reviewing and integrating code changes, testing
+and voting on release candidates, weighing in on design discussions, as well as
+other types of project contributions. The PMC votes to make a contributor a
+committer based on an assessment of their contributions to the project. It is
+expected that committers demonstrate a sustained history of high-quality
+contributions to the project and community involvement.
+
+Contributions can be made in many ways. There is no single path to becoming a
+committer, nor any expected timeline. Submitting features, improvements, and bug
+fixes is the most common avenue, but other methods are both recognized and
+encouraged (and may be even more important to the health of HBase as a project and a
+community). A non-exhaustive list of potential contributions (in no particular
+order):
+
+- [Update the documentation](/docs/contributing-to-documentation) for new
+ changes, best practices, recipes, and other improvements.
+- Keep the website up to date.
+- Perform testing and report the results. For instance, scale testing and
+ testing non-standard configurations is always appreciated.
+- Maintain the shared Jenkins testing environment and other testing
+ infrastructure.
+- [Vote on release candidates](/docs/building-and-developing/voting) after performing validation, even if non-binding.
+ A non-binding vote is a vote by a non-committer.
+- Provide input for discussion threads on the link:/mail-lists.html[mailing lists] (which usually have
+ `[DISCUSS]` in the subject line).
+- Answer questions questions on the user or developer mailing lists and on
+ Slack.
+- Make sure the HBase community is a welcoming one and that we adhere to our
+ link:/coc.html[Code of conduct]. Alert the PMC if you
+ have concerns.
+- Review other people's work (both code and non-code) and provide public
+ feedback.
+- Report bugs that are found, or file new feature requests.
+- Triage issues and keep JIRA organized. This includes closing stale issues,
+ labeling new issues, updating metadata, and other tasks as needed.
+- Mentor new contributors of all sorts.
+- Give talks and write blogs about HBase. Add these to the link:/[News] section
+ of the website.
+- Provide UX feedback about HBase, the web UI, the CLI, APIs, and the website.
+- Write demo applications and scripts.
+- Help attract and retain a diverse community.
+- Interact with other projects in ways that benefit HBase and those other
+ projects.
+
+Not every individual is able to do all (or even any) of the items on this list.
+If you think of other ways to contribute, go for it (and add them to the list).
+A pleasant demeanor and willingness to contribute are all you need to make a
+positive impact on the HBase project. Invitations to become a committer are the
+result of steady interaction with the community over the long term, which builds
+trust and recognition.
+
+#### New committers
+
+New committers are encouraged to first read Apache's generic committer
+documentation:
+
+- [Apache New Committer Guide](https://www.apache.org/dev/new-committers-guide.html)
+- [Apache Committer FAQ](https://www.apache.org/dev/committers.html)
+
+#### Review
+
+HBase committers should, as often as possible, attempt to review patches
+submitted by others. Ideally every submitted patch will get reviewed by a
+committer _within a few days_. If a committer reviews a patch they have not
+authored, and believe it to be of sufficient quality, then they can commit the
+patch. Otherwise the patch should be cancelled with a clear explanation for why
+it was rejected.
+
+The list of submitted patches is in the
+[HBase Review Queue](https://issues.apache.org/jira/secure/IssueNavigator.jspa?mode=hide&requestId=12312392),
+which is ordered by time of last modification. Committers should scan the list
+from top to bottom, looking for patches that they feel qualified to review and
+possibly commit. If you see a patch you think someone else is better qualified
+to review, you can mention them by username in the JIRA.
+
+For non-trivial changes, it is required that another committer review your
+patches before commit. **Self-commits of non-trivial patches are not allowed.**
+Use the **Submit Patch** button in JIRA, just like other contributors, and
+then wait for a `+1` response from another committer before committing.
+
+#### Reject
+
+Patches which do not adhere to the guidelines in
+[HowToContribute](/docs/building-and-developing) and to the
+[code review checklist](https://cwiki.apache.org/confluence/display/HADOOP2/CodeReviewChecklist)
+should be rejected. Committers should always be polite to contributors and try
+to instruct and encourage them to contribute better patches. If a committer
+wishes to improve an unacceptable patch, then it should first be rejected, and a
+new patch should be attached by the committer for further review.
+
+#### Commit
+
+Committers commit patches to the Apache HBase GIT repository.
+
+
+ Make sure your local configuration is correct, especially your identity and email. Examine the
+ output of the `$ git config --list` command and be sure it is correct. See [Set Up
+ Git](https://help.github.com/articles/set-up-git) if you need pointers.
+
+
+When you commit a patch:
+
+1. Include the Jira issue ID in the commit message along with a short description
+ of the change. Try to add something more than just the Jira title so that
+ someone looking at `git log` output doesn't have to go to Jira to discern what
+ the change is about. Be sure to get the issue ID right, because this causes
+ Jira to link to the change in Git (use the issue's "All" tab to see these
+ automatic links).
+2. Commit the patch to a new branch based off `master` or the other intended
+ branch. It's a good idea to include the JIRA ID in the name of this branch.
+ Check out the relevant target branch where you want to commit, and make sure
+ your local branch has all remote changes, by doing a `git pull --rebase` or
+ another similar command. Next, cherry-pick the change into each relevant
+ branch (such as master), and push the changes to the remote branch using
+ a command such as `git push `.
+
+
+ If you do not have all remote changes, the push will fail. If the push fails for any reason,
+ fix the problem or ask for help. Do not do a `git push --force`.
+
+
+ Before you can commit a patch, you need to determine how the patch was created.
+ The instructions and preferences around the way to create patches have changed,
+ and there will be a transition period.
+
+ **Determine How a Patch Was Created**
+ - If the first few lines of the patch look like the headers of an email, with a From, Date, and
+ Subject, it was created using `git format-patch`. This is the preferred way, because you can
+ reuse the submitter's commit message. If the commit message is not appropriate, you can still use
+ the commit, then run `git commit --amend` and reword as appropriate.
+ - If the first line of the patch looks similar to the following, it was created using +git diff+ without `--no-prefix`.
+ This is acceptable too.
+ Notice the `a` and `b` in front of the file names.
+ This is the indication that the patch was not created with `--no-prefix`.
+
+ ```diff
+ diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc
+ ```
+
+ - If the first line of the patch looks similar to the following (without the `a` and `b`), the
+ patch was created with `git diff --no-prefix` and you need to add `-p0` to the `git apply` command
+ below.
+
+ ```diff
+ diff --git src/main/asciidoc/_chapters/developer.adoc src/main/asciidoc/_chapters/developer.adoc
+ ```
+
+ **Example of committing a Patch**
+
+ One thing you will notice with these examples is that there are a lot of
+ `git pull` commands. The only command that actually writes anything to the
+ remote repository is `git push`, and you need to make absolutely sure you have
+ the correct versions of everything and don't have any conflicts before pushing.
+ The extra `git pull` commands are usually redundant, but better safe than sorry.
+
+ The first example shows how to apply a patch that was generated with +git
+ format-patch+ and apply it to the `master` and `branch-1` branches.
+
+ The directive to use `git format-patch` rather than `git diff`, and not to use
+ `--no-prefix`, is a new one. See the second example for how to apply a patch
+ created with `git diff`, and educate the person who created the patch.
+
+ ```bash
+ $ git checkout -b HBASE-XXXX
+ $ git am ~/Downloads/HBASE-XXXX-v2.patch --signoff # If you are committing someone else's patch.
+ $ git checkout master
+ $ git pull --rebase
+ $ git cherry-pick
+ # Resolve conflicts if necessary or ask the submitter to do it
+ $ git pull --rebase # Better safe than sorry
+ $ git push origin master
+
+ # Backport to branch-1
+ $ git checkout branch-1
+ $ git pull --rebase
+ $ git cherry-pick
+ # Resolve conflicts if necessary
+ $ git pull --rebase # Better safe than sorry
+ $ git push origin branch-1
+ $ git branch -D HBASE-XXXX
+ ```
+
+ This example shows how to commit a patch that was created using `git diff`
+ without `--no-prefix`. If the patch was created with `--no-prefix`, add `-p0` to
+ the `git apply` command.
+
+ ```bash
+ $ git apply ~/Downloads/HBASE-XXXX-v2.patch
+ $ git commit -m "HBASE-XXXX Really Good Code Fix (Joe Schmo)" --author= -a # This and next command is needed for patches created with 'git diff'
+ $ git commit --amend --signoff
+ $ git checkout master
+ $ git pull --rebase
+ $ git cherry-pick
+ # Resolve conflicts if necessary or ask the submitter to do it
+ $ git pull --rebase # Better safe than sorry
+ $ git push origin master
+
+ # Backport to branch-1
+ $ git checkout branch-1
+ $ git pull --rebase
+ $ git cherry-pick
+ # Resolve conflicts if necessary or ask the submitter to do it
+ $ git pull --rebase # Better safe than sorry
+ $ git push origin branch-1
+ $ git branch -D HBASE-XXXX
+ ```
+
+3. Resolve the issue as fixed, thanking the contributor.
+ Always set the "Fix Version" at this point, but only set a single fix version
+ for each branch where the change was committed, the earliest release in that
+ branch in which the change will appear.
+
+**Commit Message Format**
+
+The commit message should contain the JIRA ID and a description of what the patch does.
+The preferred commit message format is:
+
+```text
+ ()
+```
+
+```text
+HBASE-12345 Fix All The Things (jane@example.com)
+```
+
+If the contributor used `git format-patch` to generate the patch, their commit
+message is in their patch and you can use that, but be sure the JIRA ID is at
+the front of the commit message, even if the contributor left it out.
+
+**Use GitHub's "Co-authored-by" when there are multiple authors**
+
+We've established the practice of committing to master and then cherry picking back to branches whenever possible, unless
+
+- it's breaking compat: In which case, if it can go in minor releases, backport to branch-1 and branch-2.
+- it's a new feature: No for maintenance releases, For minor releases, discuss and arrive at consensus.
+
+There are occasions when there are multiple author for a patch.
+For example when there is a minor conflict we can fix it up and just proceed with the commit.
+The amending author will be different from the original committer, so you should also attribute to the original author by
+adding one or more `Co-authored-by` trailers to the commit's message.
+See [the GitHub documentation for "Creating a commit with multiple authors"](https://help.github.com/en/articles/creating-a-commit-with-multiple-authors/).
+
+In short, these are the steps to add Co-authors that will be tracked by GitHub:
+
+1. Collect the name and email address for each co-author.
+2. Commit the change, but after your commit description, instead of a closing quotation, add two empty lines. (Do not close the commit message with a quotation mark)
+3. On the next line of the commit message, type `Co-authored-by: name `. After the co-author information, add a closing quotation mark.
+
+Here is the example from the GitHub page, using 2 Co-authors:
+
+```bash
+$ git commit -m "Refactor usability tests.
+>
+>
+Co-authored-by: name
+Co-authored-by: another-name "
+```
+
+Note: `Amending-Author: Author ` was used prior to this
+[DISCUSSION](https://lists.apache.org/thread.html/f00b5f9b65570e777dbb31c37d7b0ffc55c5fc567aefdb456608a042@%3Cdev.hbase.apache.org%3E).
+
+**Close related GitHub PRs**
+
+As a project we work to ensure there's a JIRA associated with each change, but we don't mandate any particular tool be used for reviews. Due to implementation details of the ASF's integration between hosted git repositories and GitHub, the PMC has no ability to directly close PRs on our GitHub repo. In the event that a contributor makes a Pull Request on GitHub, either because the contributor finds that easier than attaching a patch to JIRA or because a reviewer prefers that UI for examining changes, it's important to make note of the PR in the commit that goes to the master branch so that PRs are kept up to date.
+
+To read more about the details of what kinds of commit messages will work with the GitHub "close via keyword in commit" mechanism see [the GitHub documentation for "Closing issues using keywords"](https://help.github.com/articles/closing-issues-using-keywords/). In summary, you should include a line with the phrase "closes #XXX", where the XXX is the pull request id. The pull request id is usually given in the GitHub UI in grey at the end of the subject heading.
+
+**Committers are responsible for making sure commits do not break the build or tests**
+
+If a committer commits a patch, it is their responsibility to make sure it passes the test suite.
+It is helpful if contributors keep an eye out that their patch does not break the hbase build and/or tests, but ultimately, a contributor cannot be expected to be aware of all the particular vagaries and interconnections that occur in a project like HBase.
+A committer should.
+
+**Patching Etiquette**
+
+In the thread [HBase, mail # dev - ANNOUNCEMENT: Git Migration In Progress (WAS =>
+Re: Git Migration)](https://lists.apache.org/thread.html/186fcd5eb71973a7b282ecdba41606d3d221efd505d533bb729e1fad%401400648690%40%3Cdev.hbase.apache.org%3E), it was agreed on the following patch flow
+
+1. Develop and commit the patch against master first.
+2. Try to cherry-pick the patch when backporting if possible.
+3. If this does not work, manually commit the patch to the branch.
+
+**Merge Commits**
+
+Avoid merge commits, as they create problems in the git history.
+
+**Committing Documentation**
+
+See [appendix contributing to documentation](/docs/contributing-to-documentation).
+
+**How to re-trigger github Pull Request checks/re-build**
+
+A Pull Request (PR) submission triggers the hbase yetus checks. The checks make
+sure the patch doesn't break the build or introduce test failures. The checks take
+around four hours to run (They are the same set run when you submit a patch via
+HBASE JIRA). When finished, they add a report to the PR as a comment. If a problem
+w/ the patch — failed compile, checkstyle violation, or an added findbugs --
+the original author makes fixes and pushes a new patch. This re-runs the checks
+to produce a new report.
+
+Sometimes though, the patch is good but a flakey, unrelated test has the report vote -1
+on the patch. In this case, **committers** can retrigger the check run by doing a force push of the
+exact same patch. Or, click on the `Console output` link which shows toward the end
+of the report (For example `https://builds.apache.org/job/HBase-PreCommit-GitHub-PR/job/PR-289/1/console`).
+This will take you to `builds.apache.org`, to the build run that failed. See the
+"breadcrumbs" along the top (where breadcrumbs is the listing of the directories that
+gets us to this particular build page). It'll look something like
+`Jenkins > HBase-PreCommit-GitHub-PR > PR-289 > #1`. Click on the
+PR number — i.e. PR-289 in our example — and then, when you've arrived at the PR page,
+find the 'Build with Parameters' menu-item (along top left-hand menu). Click here and
+then `Build` leaving the JIRA_ISSUE_KEY empty. This will re-run your checks.
+
+### Dialog
+
+Committers should hang out in the #hbase room on irc.freenode.net for real-time discussions.
+However any substantive discussion (as with any off-list project-related discussion) should be re-iterated in Jira or on the developer list.
+
+### Do not edit JIRA comments
+
+Misspellings and/or bad grammar is preferable to the disruption a JIRA comment edit.
+
+## The hbase-thirdparty dependency and shading/relocation
+
+A new project was created for the release of hbase-2.0.0. It was called
+`hbase-thirdparty`. This project exists only to provide the main hbase
+project with relocated — or shaded — versions of popular thirdparty
+libraries such as guava, netty, and protobuf. The mainline HBase project
+relies on the relocated versions of these libraries gotten from hbase-thirdparty
+rather than on finding these classes in their usual locations. We do this so
+we can specify whatever the version we wish. If we don't relocate, we must
+harmonize our version to match that which hadoop, spark, and other projects use.
+
+For developers, this means you need to be careful referring to classes from
+netty, guava, protobuf, gson, etc. (see the hbase-thirdparty pom.xml for what
+it provides). Devs must refer to the hbase-thirdparty provided classes. In
+practice, this is usually not an issue (though it can be a bit of a pain). You
+will have to hunt for the relocated version of your particular class. You'll
+find it by prepending the general relocation prefix of `org.apache.hbase.thirdparty.`.
+For example if you are looking for `com.google.protobuf.Message`, the relocated
+version used by HBase internals can be found at
+`org.apache.hbase.thirdparty.com.google.protobuf.Message`.
+
+For a few thirdparty libs, like protobuf (see the protobuf chapter in this book
+for the why), your IDE may give you both options — the `com.google.protobuf.*`
+and the `org.apache.hbase.thirdparty.com.google.protobuf.*` — because both
+classes are on your CLASSPATH. Unless you are doing the particular juggling
+required in Coprocessor Endpoint development (again see above cited protobuf
+chapter), you'll want to use the shaded version, always.
+
+The `hbase-thirdparty` project has groupid of `org.apache.hbase.thirdparty`.
+As of this writing, it provides three jars; one for netty with an artifactid of
+`hbase-thirdparty-netty`, one for protobuf at `hbase-thirdparty-protobuf` and then
+a jar for all else — gson, guava — at `hbase-thirdpaty-miscellaneous`.
+
+The hbase-thirdparty artifacts are a product produced by the Apache HBase
+project under the aegis of the HBase Project Management Committee. Releases
+are done via the usual voting project on the hbase dev mailing list. If issue
+in the hbase-thirdparty, use the hbase JIRA and mailing lists to post notice.
+
+## Development of HBase-related Maven archetypes
+
+The development of HBase-related Maven archetypes was begun with
+[HBASE-14876](https://issues.apache.org/jira/browse/HBASE-14876).
+For an overview of the hbase-archetypes infrastructure and instructions
+for developing new HBase-related Maven archetypes, please see
+`hbase/hbase-archetypes/README.md`.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/generating-documentation.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/generating-documentation.mdx
new file mode 100644
index 000000000000..0522feeb0341
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/generating-documentation.mdx
@@ -0,0 +1,9 @@
+---
+title: "Generating the HBase Reference Guide"
+description: "Building HBase documentation from MDX markup."
+---
+
+The manual is marked up using [MDX](https://mdxjs.com/) (just extended markdown).
+Then we render markdown into HTML content by using [Fumadocs](https://fumadocs.dev/).
+To build run `mvn site` from the root or `hbase-website` directory.
+See [appendix contributing to documentation](/docs/contributing-to-documentation) for more information on building the documentation.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/getting-involved.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/getting-involved.mdx
new file mode 100644
index 000000000000..c3078222a2c0
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/getting-involved.mdx
@@ -0,0 +1,124 @@
+---
+title: "Getting Involved"
+description: "How to contribute to Apache HBase including mailing lists, Slack, IRC, JIRA, and guidelines for reporting effective issues."
+---
+
+Apache HBase gets better only when people contribute! If you are looking to contribute to Apache HBase, look for [issues in JIRA tagged with the label 'beginner']().
+These are issues HBase contributors have deemed worthy but not of immediate priority and a good way to ramp on HBase internals.
+See [What label
+is used for issues that are good on ramps for new contributors?](https://lists.apache.org/thread.html/b122265f4e4054cf08f8cd38609fb06af72f398c44f9086b05ef4e21%401407246237%40%3Cdev.hbase.apache.org%3E) from the dev mailing list for background.
+
+Before you get started submitting code to HBase, please refer to [Developer Guidelines](/docs/building-and-developing/developer-guidelines).
+
+As Apache HBase is an Apache Software Foundation project, see [The Apache Software Foundation](/docs/asf) for more information about how the ASF functions.
+
+## Mailing Lists [#building-and-developing-getting-involved-mailing-lists]
+
+Sign up for the dev-list and the user-list.
+See the [mailing lists](https://hbase.apache.org/mailing-lists.html) page.
+Posing questions - and helping to answer other people's questions - is encouraged! There are varying levels of experience on both lists so patience and politeness are encouraged (and please stay on topic.)
+
+## Slack [#building-and-developing-getting-involved-slack]
+
+The Apache HBase project uses the #hbase channel on the official
+https://the-asf.slack.com/[ASF Slack Workspace] for real-time questions and discussion.
+All committers of any Apache projects can join the channel directly, for others, please mail
+dev@hbase.apache.org to request an invite.
+
+## Internet Relay Chat (IRC)
+
+(NOTE: Our IRC channel seems to have been deprecated in favor of the above Slack channel)
+
+For real-time questions and discussions, use the `#hbase` IRC channel on the [FreeNode](https://freenode.net/) IRC network.
+FreeNode offers a web-based client, but most people prefer a native client, and several clients are available for each operating system.
+
+## Jira [#building-and-developing-getting-involved-jira]
+
+Check for existing issues in [Jira](https://issues.apache.org/jira/projects/HBASE/issues).
+If it's either a new feature request, enhancement, or a bug, file a ticket.
+
+We track multiple types of work in JIRA:
+
+- Bug: Something is broken in HBase itself.
+- Test: A test is needed, or a test is broken.
+- New feature: You have an idea for new functionality. It's often best to bring
+ these up on the mailing lists first, and then write up a design specification
+ that you add to the feature request JIRA.
+- Improvement: A feature exists, but could be tweaked or augmented. It's often
+ best to bring these up on the mailing lists first and have a discussion, then
+ summarize or link to the discussion if others seem interested in the
+ improvement.
+- Wish: This is like a new feature, but for something you may not have the
+ background to flesh out yourself.
+
+Bugs and tests have the highest priority and should be actionable.
+
+### Guidelines for reporting effective issues
+
+- _Search for duplicates_: Your issue may have already been reported. Have a
+ look, realizing that someone else might have worded the summary differently.
+
+ Also search the mailing lists, which may have information about your problem
+ and how to work around it. Don't file an issue for something that has already
+ been discussed and resolved on a mailing list, unless you strongly disagree
+ with the resolution _and_ are willing to help take the issue forward.
+ - _Discuss in public_: Use the mailing lists to discuss what you've discovered
+ and see if there is something you've missed. Avoid using back channels, so
+ that you benefit from the experience and expertise of the project as a whole.
+ - _Don't file on behalf of others_: You might not have all the context, and you
+ don't have as much motivation to see it through as the person who is actually
+ experiencing the bug. It's more helpful in the long term to encourage others
+ to file their own issues. Point them to this material and offer to help out
+ the first time or two.
+ - _Write a good summary_: A good summary includes information about the problem,
+ the impact on the user or developer, and the area of the code.
+ - Good: `Address new license dependencies from hadoop3-alpha4`
+ - Room for improvement: `Canary is broken`
+ If you write a bad title, someone else will rewrite it for you. This is time
+ they could have spent working on the issue instead.
+ - _Give context in the description_: It can be good to think of this in multiple
+ parts:
+ - What happens or doesn't happen?
+ - How does it impact you?
+ - How can someone else reproduce it?
+ - What would "fixed" look like?
+ You don't need to know the answers for all of these, but give as much
+ information as you can. If you can provide technical information, such as a
+ Git commit SHA that you think might have caused the issue or a build failure
+ on builds.apache.org where you think the issue first showed up, share that
+ info.
+ - **Fill in all relevant fields**: These fields help us filter, categorize, and
+ find things.
+ - **One bug, one issue, one patch**: To help with back-porting, don't split issues
+ or fixes among multiple bugs.
+ - **Add value if you can**: Filing issues is great, even if you don't know how to
+ fix them. But providing as much information as possible, being willing to
+ triage and answer questions, and being willing to test potential fixes is even
+ better! We want to fix your issue as quickly as you want it to be fixed.
+ - **Don't be upset if we don't fix it**: Time and resources are finite. In some
+ cases, we may not be able to (or might choose not to) fix an issue, especially
+ if it is an edge case or there is a workaround. Even if it doesn't get fixed,
+ the JIRA is a public record of it, and will help others out if they run into
+ a similar issue in the future.
+
+### Working on an issue
+
+To check for existing issues which you can tackle as a beginner, search for [issues in JIRA tagged with the label 'beginner']().
+
+JIRA Priorites:
+
+- **Blocker**: Should only be used if the issue WILL cause data loss or cluster instability reliably.
+- **Critical**: The issue described can cause data loss or cluster instability in some cases.
+- **Major**: Important but not tragic issues, like updates to the client API that will add a lot of much-needed functionality or significant bugs that need to be fixed but that don't cause data loss.
+- **Minor**: Useful enhancements and annoying but not damaging bugs.
+- **Trivial**: Useful enhancements but generally cosmetic.
+
+Code Blocks in Jira Comments:
+
+A commonly used macro in Jira is `{code}`. Everything inside the tags is preformatted, as in this example.
+
+```text
+{code}
+code snippet
+{code}
+```
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/ides.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/ides.mdx
new file mode 100644
index 000000000000..852ee0ee462e
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/ides.mdx
@@ -0,0 +1,126 @@
+---
+title: "IDEs"
+description: "Setting up Eclipse and IntelliJ IDEA for HBase development including formatters, plugins, and Maven integration."
+---
+
+## Eclipse
+
+### Code Formatting
+
+Under the _dev-support/_ folder, you will find _hbase_eclipse_formatter.xml_.
+We encourage you to have this formatter in place in eclipse when editing HBase code.
+
+Go to `Preferences->Java->Code Style->Formatter->Import` to load the xml file.
+Go to `Preferences->Java->Editor->Save Actions`, and make sure 'Format source code' and 'Format
+edited lines' is selected.
+
+In addition to the automatic formatting, make sure you follow the style guidelines explained in
+[Code Formatting Conventions](/docs/building-and-developing/developer-guidelines#code-formatting-conventions).
+
+### Eclipse Git Plugin
+
+If you cloned the project via git, download and install the Git plugin (EGit). Attach to your local git repo (via the Git Repositories window) and you'll be able to see file revision history, generate patches, etc.
+
+### HBase Project Setup in Eclipse using `m2eclipse`
+
+The easiest way is to use the `m2eclipse` plugin for Eclipse.
+Eclipse Indigo or newer includes +m2eclipse+, or you can download it from http://www.eclipse.org/m2e/. It provides Maven integration for Eclipse, and even lets you use the direct Maven commands from within Eclipse to compile and test your project.
+
+To import the project, click and select the HBase root directory. `m2eclipse` locates all the hbase modules for you.
+
+If you install `m2eclipse` and import HBase in your workspace, do the following to fix your eclipse Build Path.
+
+- Remove _target_ folder
+- Add _target/generated-sources/java_ folder.
+- Remove from your Build Path the exclusions on the _src/main/resources_ and _src/test/resources_ to avoid error message in the console, such as the following:
+ ```text
+ Failed to execute goal
+ org.apache.maven.plugins:maven-antrun-plugin:1.6:run (default) on project hbase:
+ 'An Ant BuildException has occurred: Replace: source file .../target/classes/hbase-default.xml
+ doesn't exist
+ ```
+ This will also reduce the eclipse build cycles and make your life easier when developing.
+
+### HBase Project Setup in Eclipse Using the Command Line
+
+Instead of using `m2eclipse`, you can generate the Eclipse files from the command line.
+
+- First, run the following command, which builds HBase.
+ You only need to do this once.
+
+ ```bash
+ mvn clean install -DskipTests
+ ```
+
+- Close Eclipse, and execute the following command from the terminal, in your local HBase project directory, to generate new _.project_ and _.classpath_ files.
+
+ ```bash
+ mvn eclipse:eclipse
+ ```
+
+- Reopen Eclipse and import the _.project_ file in the HBase directory to a workspace.
+
+### Maven Classpath Variable
+
+The `$M2_REPO` classpath variable needs to be set up for the project.
+This needs to be set to your local Maven repository, which is usually _~/.m2/repository_
+
+If this classpath variable is not configured, you will see compile errors in Eclipse like this:
+
+```text
+Description Resource Path Location Type
+The project cannot be built until build path errors are resolved hbase Unknown Java Problem
+Unbound classpath variable: 'M2_REPO/asm/asm/3.1/asm-3.1.jar' in project 'hbase' hbase Build path Build Path Problem
+Unbound classpath variable: 'M2_REPO/com/google/guava/guava/r09/guava-r09.jar' in project 'hbase' hbase Build path Build Path Problem
+Unbound classpath variable: 'M2_REPO/com/google/protobuf/protobuf-java/2.3.0/protobuf-java-2.3.0.jar' in project 'hbase' hbase Build path Build Path Problem Unbound classpath variable:
+```
+
+### Eclipse Known Issues
+
+Eclipse will currently complain about _Bytes.java_.
+It is not possible to turn these errors off.
+
+```text
+Description Resource Path Location Type
+Access restriction: The method arrayBaseOffset(Class) from the type Unsafe is not accessible due to restriction on required library /System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/classes.jar Bytes.java /hbase/src/main/java/org/apache/hadoop/hbase/util line 1061 Java Problem
+Access restriction: The method arrayIndexScale(Class) from the type Unsafe is not accessible due to restriction on required library /System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/classes.jar Bytes.java /hbase/src/main/java/org/apache/hadoop/hbase/util line 1064 Java Problem
+Access restriction: The method getLong(Object, long) from the type Unsafe is not accessible due to restriction on required library /System/Library/Java/JavaVirtualMachines/1.6.0.jdk/Contents/Classes/classes.jar Bytes.java /hbase/src/main/java/org/apache/hadoop/hbase/util line 1111 Java Problem
+```
+
+### Eclipse - More Information
+
+For additional information on setting up Eclipse for HBase development on Windows, see [Michael Morello's blog](http://michaelmorello.blogspot.com/2011/09/hbase-subversion-eclipse-windows.html) on the topic.
+
+## IntelliJ IDEA
+
+A functional development environment can be setup around an IntelliJ IDEA installation that has the
+plugins necessary for building Java projects with Maven.
+
+- Use either File > New > "Project from Existing Sources..." or "Project From Version Control.."
+- Depending on your version of IntelliJ, you may need to choose Maven as the "project" or "model"
+ type.
+
+The following plugins are recommended:
+
+- Maven, bundled. This allows IntelliJ to resolve dependencies and recognize the project structure.
+- EditorConfig, bundled. This will apply project whitespace settings found in the
+ `.editorconfig` file available on branches with
+ [HBASE-23234](https://issues.apache.org/jira/browse/HBASE-23234) or later.
+- [Checkstyle-IDEA](https://plugins.jetbrains.com/plugin/1065-checkstyle-idea/). Configure this
+ against the configuration file found under `hbase-checkstyle/src/main/resources/hbase/checkstyle.xml`
+ (If the Intellij checkstyle plugin complains parsing the volunteered hbase `checkstyle.xml`, make
+ sure the plugin's `version` popup menu matches the hbase checkstyle version. Find the current
+ checkstyle version as a property in `pom.xml`.
+ This plugin will highlight style errors in the IDE, so you can fix them before they get flagged during the
+ pre-commit process.
+- [Protobuf Support](https://plugins.jetbrains.com/plugin/8277-protobuf-support/). HBase uses
+ [Protocol Buffers](https://developers.google.com/protocol-buffers/) in a number of places where
+ serialization is required. This plugin is helpful when editing these object definitions.
+- [MDX](https://plugins.jetbrains.com/plugin/14944-mdx). HBase uses
+ [MDX](https://mdxjs.com) (just extended markdown) for building it's project documentation. This plugin is helpful
+ when editing this book.
+
+## Other IDEs
+
+If you'd have another environment with which you'd like to develop on HBase, please consider
+documenting your setup process here.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/index.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/index.mdx
new file mode 100644
index 000000000000..704d236d30dc
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/index.mdx
@@ -0,0 +1,7 @@
+---
+title: "Building and Developing Apache HBase"
+description: "Comprehensive guide for building, testing, releasing, and contributing to Apache HBase."
+---
+
+This chapter contains information and guidelines for building and releasing HBase code and documentation.
+Being familiar with these guidelines will help the HBase committers to use your contributions more easily.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/meta.json b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/meta.json
new file mode 100644
index 000000000000..23f427b6ba17
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/meta.json
@@ -0,0 +1,16 @@
+{
+ "title": "Building and Developing Apache HBase",
+ "pages": [
+ "getting-involved",
+ "repositories",
+ "ides",
+ "building",
+ "releasing",
+ "voting",
+ "announcing",
+ "generating-documentation",
+ "updating-landing",
+ "tests",
+ "developer-guidelines"
+ ]
+}
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/releasing.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/releasing.mdx
new file mode 100644
index 000000000000..b0cd7a1e97d2
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/releasing.mdx
@@ -0,0 +1,401 @@
+---
+title: "Releasing Apache HBase"
+description: "Complete guide to creating HBase release candidates including building, signing, staging artifacts, and publishing releases."
+---
+
+
+ See old refguides for how to build HBase 1.x. The below is for building hbase2.
+
+
+## Making a Release Candidate
+
+Only committers can make releases of hbase artifacts.
+
+**Before You Begin**
+
+Check to be sure recent builds have been passing for the branch from where you
+are going to take your release. You should also have tried recent branch tips
+out on a cluster under load, perhaps by running the `hbase-it` integration test
+suite for a few hours to 'burn in' the near-candidate bits.
+
+You will need a published signing key added to the hbase
+[KEYS](https://dist.apache.org/repos/dist/release/hbase/KEYS) file.
+(For how to add a KEY, see _Step 1._ in [How To Release](https://cwiki.apache.org/confluence/display/HADOOP2/HowToRelease),
+the Hadoop version of this document).
+
+Next make sure JIRA is properly primed, that all issues targeted against
+the prospective release have been resolved and are present in git on the
+particular branch. If any outstanding issues, move them out of the release by
+adjusting the fix version to remove this pending release as a target.
+Any JIRA with a fix version that matches the release candidate
+target release will be included in the generated _CHANGES.md/RELEASENOTES.md_
+files that ship with the release so make sure JIRA is correct before you begin.
+
+After doing the above, you can move to the manufacture of an RC.
+
+Building an RC is involved so we've scripted it. The script builds in a Docker
+container to ensure we have a consistent environment building. It will ask you
+for passwords for apache and for your gpg signing key so it can sign and commit
+on your behalf. The passwords are passed to gpg-agent in the container and
+purged along with the container when the build is done.
+
+The script will:
+
+- Set version to the release version
+- Updates RELEASENOTES.md and CHANGES.md
+- Tag the RC
+- Set version to next SNAPSHOT version.
+- Builds, signs, and hashes all artifacts.
+- Generates the api compatibility report
+- Pushes release tgzs to the dev dir in a apache dist.
+- Pushes to repository.apache.org staging.
+- Creates vote email template.
+
+The _dev-support/create-release/do-release-docker.sh_ Release Candidate (RC)
+Generating script is maintained in the master branch but can generate RCs
+for any 2.x+ branch (The script does not work against branch-1). Check out
+and update the master branch when making RCs. See
+_dev-support/create-release/README.txt_ for how to configure your
+environment and run the script.
+
+
+ _dev-support/create-release/do-release-docker.sh_ supercedes the previous _dev-support/make_rc.sh_
+ script. It is more comprehensive automating all steps, rather than a portion, building a RC.
+
+
+### Release Candidate Procedure
+
+Here we outline the steps involved generating a Release Candidate, the steps
+automated by the _dev-support/create-release/do-release-docker.sh_ script
+described in the previous section. Running these steps manually tends to
+be error-prone so is not recommended. The below is informational only.
+
+The process below makes use of various tools, mainly _git_ and _maven_.
+
+
+You may run into OutOfMemoryErrors building, particularly building the site and
+documentation. Up the heap for Maven by setting the `MAVEN_OPTS` variable.
+You can prefix the variable to the Maven command, as in the following example:
+
+```bash
+MAVEN_OPTS="-Xmx4g -XX:MaxPermSize=256m" mvn package
+```
+
+You could also set this in an environment variable or alias in your shell.
+
+
+
+
+
+
+#### Example _~/.m2/settings.xml_ File
+
+Publishing to maven requires you sign the artifacts you want to upload.
+For the build to sign them for you, you a properly configured _settings.xml_
+in your local repository under _.m2_, such as the following.
+
+```xml
+
+
+
+
+ apache.snapshots.https
+ YOUR_APACHE_ID
+
+ YOUR_APACHE_PASSWORD
+
+
+
+
+
+ apache.releases.https
+ YOUR_APACHE_ID
+
+ YOUR_APACHE_PASSWORD
+
+
+
+
+
+ apache-release
+
+ YOUR_KEYNAME
+
+ YOUR_KEY_PASSWORD
+
+
+
+
+
+```
+
+
+
+
+#### Update the _CHANGES.md_ and _RELEASENOTES.md_ files and the POM files.
+
+Update _CHANGES.md_ with the changes since the last release. Be careful with where you put
+headings and license. Respect the instructions and warning you find in current
+_CHANGES.md_ and _RELEASENOTES.md_ since these two files are processed by tooling that is
+looking for particular string sequences. See [HBASE-21399](https://issues.apache.org/jira/browse/HBASE-21399)
+for description on how to make use of yetus generating additions to
+_CHANGES.md_ and _RELEASENOTES.md_ (RECOMMENDED!). Adding JIRA fixes, make sure the
+URL to the JIRA points to the proper location which lists fixes for this release.
+
+Next, adjust the version in all the POM files appropriately.
+If you are making a release candidate, you must remove the `-SNAPSHOT` label from all versions
+in all pom.xml files.
+If you are running this receipe to publish a snapshot, you must keep the `-SNAPSHOT` suffix on the hbase version.
+The [Versions Maven Plugin](http://www.mojohaus.org/versions-maven-plugin/) can be of use here.
+To set a version in all the many poms of the hbase multi-module project, use a command like the following:
+
+```bash
+$ mvn clean org.codehaus.mojo:versions-maven-plugin:2.5:set -DnewVersion=2.1.0-SNAPSHOT
+```
+
+Make sure all versions in poms are changed! Checkin the _CHANGES.md_, _RELEASENOTES.md_, and
+any maven version changes.
+
+
+
+
+
+#### Update the documentation.
+
+Update the documentation under _hbase-website/app/page/\_docs/docs/\_mdx/(multi-page)_.
+This usually involves copying the latest from master branch and making version-particular
+adjustments to suit this release candidate version. Commit your changes.
+
+
+
+
+#### Clean the checkout dir
+
+```bash
+$ mvn clean
+$ git clean -f -x -d
+```
+
+
+
+
+#### Run Apache-Rat
+
+Check licenses are good
+
+```bash
+$ mvn apache-rat:check
+```
+
+If the above fails, check the rat log.
+
+```bash
+$ grep 'Rat check' patchprocess/mvn_apache_rat.log
+```
+
+
+
+
+#### Create a release tag.
+
+Presuming you have run basic tests, the rat check, passes and all is
+looking good, now is the time to tag the release candidate (You
+always remove the tag if you need to redo). To tag, do
+what follows substituting in the version appropriate to your build.
+All tags should be signed tags; i.e. pass the _-s_ option (See
+[Signing Your Work](https://git-scm.com/book/id/v2/Git-Tools-Signing-Your-Work)
+for how to set up your git environment for signing).
+
+```bash
+$ git tag -s 2.0.0-alpha4-RC0 -m "Tagging the 2.0.0-alpha4 first Releae Candidate (Candidates start at zero)"
+```
+
+Or, if you are making a release, tags should have a _rel/_ prefix to ensure
+they are preserved in the Apache repo as in:
+
+```bash
++$ git tag -s rel/2.0.0-alpha4 -m "Tagging the 2.0.0-alpha4 Release"
+```
+
+Push the (specific) tag (only) so others have access.
+
+```bash
+$ git push origin 2.0.0-alpha4-RC0
+```
+
+For how to delete tags, see
+[How to Delete a Tag](http://www.manikrathee.com/how-to-delete-a-tag-in-git.html). Covers
+deleting tags that have not yet been pushed to the remote Apache
+repo as well as delete of tags pushed to Apache.
+
+
+
+
+#### Build the source tarball.
+
+Now, build the source tarball. Lets presume we are building the source
+tarball for the tag _2.0.0-alpha4-RC0_ into _/tmp/hbase-2.0.0-alpha4-RC0/_
+(This step requires that the mvn and git clean steps described above have just been done).
+
+```bash
+$ git archive --format=tar.gz --output="/tmp/hbase-2.0.0-alpha4-RC0/hbase-2.0.0-alpha4-src.tar.gz" --prefix="hbase-2.0.0-alpha4/" $git_tag
+```
+
+Above we generate the hbase-2.0.0-alpha4-src.tar.gz tarball into the
+_/tmp/hbase-2.0.0-alpha4-RC0_ build output directory (We don't want the _RC0_ in the name or prefix.
+These bits are currently a release candidate but if the VOTE passes, they will become the release so we do not taint
+the artifact names with _RCX_).
+
+
+
+
+#### Build the binary tarball.
+
+Next, build the binary tarball. Add the `-Prelease` profile when building.
+It runs the license apache-rat check among other rules that help ensure
+all is wholesome. Do it in two steps.
+
+First install into the local repository
+
+```bash
+$ mvn clean install -DskipTests -Prelease
+```
+
+Next, generate documentation and assemble the tarball. Be warned,
+this next step can take a good while, a couple of hours generating site
+documentation.
+
+```bash
+$ mvn install -DskipTests site assembly:single -Prelease
+```
+
+Otherwise, the build complains that hbase modules are not in the maven repository
+when you try to do it all in one step, especially on a fresh repository.
+It seems that you need the install goal in both steps.
+
+Extract the generated tarball — you'll find it under
+_hbase-assembly/target_ and check it out.
+Look at the documentation, see if it runs, etc.
+If good, copy the tarball beside the source tarball in the
+build output directory.
+
+
+
+
+#### Deploy to the Maven Repository.
+
+Next, deploy HBase to the Apache Maven repository. Add the
+apache-release`profile when running the`mvn deploy` command.
+This profile comes from the Apache parent pom referenced by our pom files.
+It does signing of your artifacts published to Maven, as long as the
+_settings.xml_ is configured correctly, as described in [Example ~/.m2/settings.xml File](/docs/building-and-developing/releasing#example-m2settingsxml-file).
+This step depends on the local repository having been populate
+by the just-previous bin tarball build.
+
+```bash
+$ mvn deploy -DskipTests -Papache-release -Prelease
+```
+
+This command copies all artifacts up to a temporary staging Apache mvn repository in an 'open' state.
+More work needs to be done on these maven artifacts to make them generally available.
+
+We do not release HBase tarball to the Apache Maven repository. To avoid deploying the tarball, do not
+include the `assembly:single` goal in your `mvn deploy` command. Check the deployed artifacts as described in the next section.
+
+
+ If you ran the old _dev-support/make_rc.sh_ script, this is as far as it takes you. To finish the
+ release, take up the script from here on out.
+
+
+
+
+#### Make the Release Candidate available.
+
+The artifacts are in the maven repository in the staging area in the 'open' state.
+While in this 'open' state you can check out what you've published to make sure all is good.
+To do this, log in to Apache's Nexus at [repository.apache.org](https://repository.apache.org) using your Apache ID.
+Find your artifacts in the staging repository. Click on 'Staging Repositories' and look for a new one ending in "hbase" with a status of 'Open', select it.
+Use the tree view to expand the list of repository contents and inspect if the artifacts you expect are present. Check the POMs.
+As long as the staging repo is open you can re-upload if something is missing or built incorrectly.
+
+If something is seriously wrong and you would like to back out the upload, you can use the 'Drop' button to drop and delete the staging repository.
+Sometimes the upload fails in the middle. This is another reason you might have to 'Drop' the upload from the staging repository.
+
+If it checks out, close the repo using the 'Close' button. The repository must be closed before a public URL to it becomes available. It may take a few minutes for the repository to close. Once complete you'll see a public URL to the repository in the Nexus UI. You may also receive an email with the URL. Provide the URL to the temporary staging repository in the email that announces the release candidate.
+(Folks will need to add this repo URL to their local poms or to their local _settings.xml_ file to pull the published release candidate artifacts.)
+
+When the release vote concludes successfully, return here and click the 'Release' button to release the artifacts to central. The release process will automatically drop and delete the staging repository.
+
+
+ See the [hbase-downstreamer](https://github.com/saintstack/hbase-downstreamer) test for a simple
+ example of a project that is downstream of HBase an depends on it. Check it out and run its simple
+ test to make sure maven artifacts are properly deployed to the maven repository. Be sure to edit
+ the pom to point to the proper staging repository. Make sure you are pulling from the repository
+ when tests run and that you are not getting from your local repository, by either passing the `-U`
+ flag or deleting your local repo content and check maven is pulling from remote out of the staging
+ repository.
+
+
+See [Publishing Maven Artifacts](https://www.apache.org/dev/publishing-maven-artifacts.html) for some pointers on this maven staging process.
+
+If the HBase version ends in `-SNAPSHOT`, the artifacts go elsewhere.
+They are put into the Apache snapshots repository directly and are immediately available.
+Making a SNAPSHOT release, this is what you want to happen.
+
+At this stage, you have two tarballs in your 'build output directory' and a set of artifacts
+in a staging area of the maven repository, in the 'closed' state.
+Next sign, fingerprint and then 'stage' your release candiate build output directory via svnpubsub by committing
+your directory to [The dev distribution directory](https://dist.apache.org/repos/dist/dev/hbase/)
+(See comments on [HBASE-10554 Please delete old releases from mirroring system](https://issues.apache.org/jira/browse/HBASE-10554)
+but in essence it is an svn checkout of [dev/hbase](https://dist.apache.org/repos/dist/dev/hbase) — releases are at
+[release/hbase](https://dist.apache.org/repos/dist/release/hbase)). In the _version directory_ run the following commands:
+
+```bash
+$ for i in *.tar.gz; do echo $i; gpg --print-md MD5 $i > $i.md5 ; done
+$ for i in *.tar.gz; do echo $i; gpg --print-md SHA512 $i > $i.sha ; done
+$ for i in *.tar.gz; do echo $i; gpg --armor --output $i.asc --detach-sig $i ; done
+$ cd ..
+# Presuming our 'build output directory' is named 0.96.0RC0, copy it to the svn checkout of the dist dev dir
+# in this case named hbase.dist.dev.svn
+$ cd /Users/stack/checkouts/hbase.dist.dev.svn
+$ svn info
+Path: .
+Working Copy Root Path: /Users/stack/checkouts/hbase.dist.dev.svn
+URL: https://dist.apache.org/repos/dist/dev/hbase
+Repository Root: https://dist.apache.org/repos/dist
+Repository UUID: 0d268c88-bc11-4956-87df-91683dc98e59
+Revision: 15087
+Node Kind: directory
+Schedule: normal
+Last Changed Author: ndimiduk
+Last Changed Rev: 15045
+Last Changed Date: 2016-08-28 11:13:36 -0700 (Sun, 28 Aug 2016)
+$ mv 0.96.0RC0 /Users/stack/checkouts/hbase.dist.dev.svn
+$ svn add 0.96.0RC0
+$ svn commit ...
+```
+
+Ensure it actually gets published by checking [https://dist.apache.org/repos/dist/dev/hbase/](https://dist.apache.org/repos/dist/dev/hbase/).
+
+Announce the release candidate on the mailing list and call a vote.
+
+
+
+
+### Publishing a SNAPSHOT to maven
+
+Make sure your _settings.xml_ is set up properly (see [Example ~/.m2/settings.xml File](/docs/building-and-developing/releasing#example-m2settingsxml-file)).
+Make sure the hbase version includes `-SNAPSHOT` as a suffix.
+Following is an example of publishing SNAPSHOTS of a release that had an hbase version of 0.96.0 in its poms.
+
+```bash
+$ mvn clean install -DskipTests javadoc:aggregate site assembly:single -Prelease
+$ mvn -DskipTests deploy -Papache-release
+```
+
+The _make_rc.sh_ script mentioned above (see [Making a Release Candidate](/docs/building-and-developing/releasing#making-a-release-candidate)) can help you publish `SNAPSHOTS`.
+Make sure your `hbase.version` has a `-SNAPSHOT` suffix before running the script.
+It will put a snapshot up into the apache snapshot repository for you.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/repositories.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/repositories.mdx
new file mode 100644
index 000000000000..2530ae45968e
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/repositories.mdx
@@ -0,0 +1,13 @@
+---
+title: "Apache HBase Repositories"
+description: "List of Apache HBase Git repositories including main HBase, connectors, operator tools, website, and third-party libraries."
+---
+
+Apache HBase consists of multiple repositories which are hosted on [Apache GitBox](https://gitbox.apache.org/).
+These are the following:
+
+- [hbase](https://gitbox.apache.org/repos/asf?p=hbase.git) - main Apache HBase repository
+- [hbase-connectors](https://gitbox.apache.org/repos/asf?p=hbase-connectors.git) - connectors to Apache Kafka and Apache Spark
+- [hbase-operator-tools](https://gitbox.apache.org/repos/asf?p=hbase-operator-tools.git) - operability and supportability tools, such as [HBase HBCK2](/docs/operational-management/tools#hbase-hbck2)
+- [hbase-site](https://gitbox.apache.org/repos/asf?p=hbase-site.git) - hbase.apache.org website
+- [hbase-thirdparty](https://gitbox.apache.org/repos/asf?p=hbase-thirdparty.git) - relocated versions of popular third-party libraries
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/tests.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/tests.mdx
new file mode 100644
index 000000000000..52b143bf6a53
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/tests.mdx
@@ -0,0 +1,810 @@
+---
+title: "Tests"
+description: "Writing and running HBase unit tests, integration tests, and ChaosMonkey destructive tests for comprehensive test coverage."
+---
+
+Developers, at a minimum, should familiarize themselves with the unit test detail; unit tests in HBase have a character not usually seen in other projects.
+
+This information is about unit tests for HBase itself.
+For developing unit tests for your HBase applications, see [Unit Testing HBase Applications](/docs/unit-testing).
+
+## Apache HBase Modules
+
+As of 0.96, Apache HBase is split into multiple modules.
+This creates "interesting" rules for how and where tests are written.
+If you are writing code for `hbase-server`, see [Unit Tests](/docs/building-and-developing/tests#building-and-developing-unit-tests) for how to write your tests.
+These tests can spin up a minicluster and will need to be categorized.
+For any other module, for example `hbase-common`, the tests must be strict unit tests and just test the class under test - no use of the HBaseTestingUtility or minicluster is allowed (or even possible given the dependency tree).
+
+Starting from 3.0.0, HBaseTestingUtility is renamed to HBaseTestingUtil and marked as IA.Private. Of course the API is still the same.
+
+### Testing the HBase Shell
+
+The HBase shell and its tests are predominantly written in jruby.
+
+In order to make these tests run as a part of the standard build, there are a few JUnit test classes that take care of loading the jruby implemented tests and running them.
+The tests were split into separate classes to accomodate class level timeouts (see [Unit Tests](/docs/building-and-developing/tests#building-and-developing-unit-tests) for specifics).
+You can run all of these tests from the top level with:
+
+```bash
+mvn clean test -Dtest=Test*Shell
+```
+
+If you have previously done a `mvn install`, then you can instruct maven to run only the tests in the hbase-shell module with:
+
+```bash
+mvn clean test -pl hbase-shell
+```
+
+Alternatively, you may limit the shell tests that run using the system variable `shell.test`.
+This value should specify the ruby literal equivalent of a particular test case by name.
+For example, the tests that cover the shell commands for altering tables are contained in the test case `AdminAlterTableTest` and you can run them with:
+
+```bash
+mvn clean test -pl hbase-shell -Dshell.test=/AdminAlterTableTest/
+```
+
+You may also use a [Ruby Regular Expression
+literal](https://docs.ruby-lang.org/en/master/syntax/literals_rdoc.html#label-Regexp+Literals) (in the `/pattern/` style) to select a set of test cases.
+You can run all of the HBase admin related tests, including both the normal administration and the security administration, with the command:
+
+```bash
+mvn clean test -pl hbase-shell -Dshell.test=/.*Admin.*Test/
+```
+
+In the event of a test failure, you can see details by examining the XML version of the surefire report results
+
+```bash
+vim hbase-shell/target/surefire-reports/TEST-org.apache.hadoop.hbase.client.TestShell.xml
+```
+
+### Running Tests in other Modules
+
+If the module you are developing in has no other dependencies on other HBase modules, then you can cd into that module and just run:
+
+```bash
+mvn test
+```
+
+which will just run the tests IN THAT MODULE.
+If there are other dependencies on other modules, then you will have run the command from the ROOT HBASE DIRECTORY.
+This will run the tests in the other modules, unless you specify to skip the tests in that module.
+For instance, to skip the tests in the hbase-server module, you would run:
+
+```bash
+mvn clean test -PskipServerTests
+```
+
+from the top level directory to run all the tests in modules other than hbase-server.
+Note that you can specify to skip tests in multiple modules as well as just for a single module.
+For example, to skip the tests in `hbase-server` and `hbase-common`, you would run:
+
+```bash
+mvn clean test -PskipServerTests -PskipCommonTests
+```
+
+Also, keep in mind that if you are running tests in the `hbase-server` module you will need to apply the maven profiles discussed in [Running tests](/docs/building-and-developing/tests#running-tests) to get the tests to run properly.
+
+## Unit Tests [#building-and-developing-unit-tests]
+
+Apache HBase unit tests must carry a Category annotation and
+as of `hbase-2.0.0`, must be stamped with the HBase `ClassRule`.
+Here is an example of what a Test Class looks like with a
+Category and ClassRule included:
+
+```java
+...
+@Category(SmallTests.class)
+public class TestHRegionInfo {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestHRegionInfo.class);
+
+ @Test
+ public void testCreateHRegionInfoName() throws Exception {
+ // ...
+ }
+}
+```
+
+Here the Test Class is `TestHRegionInfo`. The `CLASS_RULE` has
+the same form in every test class only the `.class` you pass
+is that of the local test; i.e. in the TestTimeout Test Class, you'd
+pass `TestTimeout.class` to the `CLASS_RULE` instead of the
+`TestHRegionInfo.class` we have above. The `CLASS_RULE`
+is where we'll enforce timeouts (currently set at a hard-limit of
+thirteen! minutes for all tests — 780 seconds) and other cross-unit test facility.
+The test is in the `SmallTest` Category.
+
+Categories can be arbitrary and provided as a list but each test MUST
+carry one from the following list of sizings: `small`, `medium`, `large`, and
+`integration`. The test sizing is designated using the JUnit
+[categories](https://github.com/junit-team/junit4/wiki/Categories): `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`.
+JUnit Categories are denoted using java annotations (a special unit test looks
+for the presence of the @Category annotation in all unit tess and will fail if it
+finds a test suite missing a sizing marking).
+
+The first three categories, `small`, `medium`, and `large`, are for test cases which run when you
+type `$ mvn test`.
+In other words, these three categorizations are for HBase unit tests.
+The `integration` category is not for unit tests, but for integration tests.
+These are normally run when you invoke `$ mvn verify`.
+Integration tests are described in [Integration Tests](/docs/building-and-developing/tests#building-and-developing-integration-tests).
+
+Keep reading to figure which annotation of the set `small`, `medium`, and `large`
+to put on your new HBase test case.
+
+### Categorizing Tests
+
+#### Small Tests:
+
+_Small_ test cases are executed in separate JVM and each test suite/test class should
+run in 15 seconds or less; i.e. a [junit test fixture](https://en.wikipedia.org/wiki/JUnit), a java object made
+up of test methods, should finish in under 15 seconds, no matter how many or how few test methods
+it has. These test cases should not use a minicluster as a minicluster starts many services,
+most unrelated to what is being tested.
+
+#### Medium Tests:
+
+_Medium_ test cases are executed in separate JVM and individual test suites or test classes or in
+junit parlance, [test fixture](https://en.wikipedia.org/wiki/JUnit), should run in 50 seconds
+or less. These test cases can use a mini cluster. Since we start up a JVM per test fixture (and
+often a cluster too), be sure to make the startup pay by writing test fixtures that do a lot of
+testing running tens of seconds perhaps combining test rather than spin up a jvm (and cluster)
+per test method; this practice will help w/ overall test times.
+
+#### Large Tests:
+
+_Large_ test cases are everything else. They are typically large-scale tests, regression tests
+for specific bugs, timeout tests, or performance tests. No large test suite can take longer than
+thirteen minutes. It will be killed as timed out. Cast your test as an Integration Test if it needs
+to run longer.
+
+#### Integration Tests: [#building-and-developing-unit-tests-categorizing-tests-integration-tests]
+
+_Integration_ tests are system level tests.
+See [Integration Tests](/docs/building-and-developing/tests#building-and-developing-integration-tests) for more info.
+If you invoke `$ mvn test` on integration tests, there is no timeout for the test.
+
+## Running tests
+
+The state of tests on the hbase branches varies. Some branches keep good test hygiene and all tests pass
+reliably with perhaps an unlucky sporadic flakey test failure. On other branches, the case may be less so with
+frequent flakies and even broken tests in need of attention that fail 100% of the time. Try and figure
+the state of tests on the branch you are currently interested in; the current state of nightly
+[apache jenkins builds](https://builds.apache.org/view/H-L/view/HBase/job/HBase%20Nightly/) is a good
+place to start. Tests on master branch are generally not in the best of condition as releases
+are less frequent off master. This can make it hard landing patches especially given our dictum that
+patches land on master branch first.
+
+The full test suite can take from 5-6 hours on an anemic VM with 4 CPUs and minimal
+parallelism to 50 minutes or less on a linux machine with dozens of CPUs and plenty of
+RAM.
+
+When you go to run the full test suite, make sure you up the test runner user nproc
+(`ulimit -u` — make sure it > 6000 or more if more parallelism) and the number of
+open files (`ulimit -n` — make sure it > 10240 or more) limits on your system.
+Errors because the test run hits
+limits are often only opaquely related to the constraint. You can see the current
+user settings by running `ulimit -a`.
+
+### Default: small and medium category tests
+
+Running `mvn test` will execute all small tests in a single JVM (no fork) and then medium tests in a
+forked, separate JVM for each test instance (For definition of 'small' test and so on, see
+[Unit Tests](/docs/building-and-developing/tests#building-and-developing-unit-tests)). Medium tests are NOT executed if there is an error in a
+small test. Large tests are NOT executed.
+
+### Running all tests
+
+Running `mvn test -P runAllTests` will execute small tests in a single JVM, then medium and large tests
+in a forked, separate JVM for each test. Medium and large tests are NOT executed if there is an error in
+a small test.
+
+### Running a single test or all tests in a package
+
+To run an individual test, e.g. `MyTest`, rum `mvn test -Dtest=MyTest` You can also pass multiple,
+individual tests as a comma-delimited list:
+
+```bash
+mvn test -Dtest=MyTest1,MyTest2,MyTest3
+```
+
+You can also pass a package, which will run all tests under the package:
+
+```bash
+mvn test '-Dtest=org.apache.hadoop.hbase.client.*'
+```
+
+When `-Dtest` is specified, the `localTests` profile will be used.
+Each junit test is executed in a separate JVM (A fork per test class).
+There is no parallelization when tests are running in this mode.
+You will see a new message at the end of the -report: `"[INFO] Tests are skipped"`.
+It's harmless. However, you need to make sure the sum of
+`Tests run:` in the `Results:` section of test reports matching the number of tests
+you specified because no error will be reported when a non-existent test case is specified.
+
+### Other test invocation permutations
+
+Running `mvn test -P runSmallTests` will execute "small" tests only, using a single JVM.
+
+Running `mvn test -P runMediumTests` will execute "medium" tests only, launching a new JVM for each test-class.
+
+Running `mvn test -P runLargeTests` will execute "large" tests only, launching a new JVM for each test-class.
+
+For convenience, you can run `mvn test -P runDevTests` to execute both small and medium tests, using a single JVM.
+
+### Running tests faster
+
+By default, `$ mvn test -P runAllTests` runs all tests using a quarter of the CPUs available on machine
+hosting the test run (see `surefire.firstPartForkCount` and `surefire.secondPartForkCount` in the top-level
+hbase `pom.xml` which default to 0.25C, or 1/4 of CPU count). Up these counts to get the build to run faster.
+You can also have hbase modules
+run their tests in parrallel when the dependency graph allows by passing `--threads=N` when you invoke
+maven, where `N` is the amount of parallelism wanted.
+maven, where `N` is the amount of _module_ parallelism wanted.
+
+For example, allowing that you want to use all cores on a machine to run tests,
+you could start up the maven test run with:
+
+```bash
+$ x="1.0C"; mvn -Dsurefire.firstPartForkCount=$x -Dsurefire.secondPartForkCount=$x test -PrunAllTests
+```
+
+If a 32 core machine, you should see periods during which 32 forked jvms appear in your process listing each running unit tests.
+Your milage may vary. Dependent on hardware, overcommittment of CPU and/or memory can bring the test suite crashing down,
+usually complaining with a spew of test system exits and incomplete test report xml files. Start gently, with the default fork
+and move up gradually.
+
+Adding the `--threads=N`, maven will run N maven modules in parallel (when module inter-dependencies allow). Be aware, if you have
+set the forkcount to `1.0C`, and the `--threads` count to '2', the number of concurrent test runners can approach
+2 \* CPU, a count likely to overcommit the host machine (with attendant test exits failures).
+
+You will need ~2.2GB of memory per forked JVM plus the memory used by maven itself (3-4G).
+
+#### RAM Disk
+
+To increase the speed, you can as well use a ramdisk. 2-3G should be sufficient. Be sure to
+delete the files between each test run. The typical way to configure a ramdisk on Linux is:
+
+```bash
+$ sudo mkdir /ram2G
+sudo mount -t tmpfs -o size=2048M tmpfs /ram2G
+```
+
+You can then use it to run all HBase tests on 2.0 with the command:
+
+```bash
+mvn test -PrunAllTests -Dtest.build.data.basedirectory=/ram2G
+```
+
+### hbasetests.sh
+
+It's also possible to use the script `hbasetests.sh`.
+This script runs the medium and large tests in parallel with two maven instances, and provides a single report.
+This script does not use the hbase version of surefire so no parallelization is being done other than the two maven instances the script sets up.
+It must be executed from the directory which contains the _pom.xml_.
+
+For example running `./dev-support/hbasetests.sh` will execute small and medium tests.
+Running `./dev-support/hbasetests.sh runAllTests` will execute all tests.
+Running `./dev-support/hbasetests.sh replayFailed` will rerun the failed tests a second time, in a separate jvm and without parallelisation.
+
+### Test Timeouts
+
+The HBase unit test sizing Categorization timeouts are not strictly enforced.
+
+Any test that runs longer than ten minutes will be timedout/killed.
+
+As of hbase-2.0.0, we have purged all per-test-method timeouts: i.e.
+
+```java
+...
+ @Test(timeout=30000)
+ public void testCreateHRegionInfoName() throws Exception {
+ // ...
+ }
+```
+
+They are discouraged and don't make much sense given we are timing
+base of how long the whole Test Fixture/Class/Suite takes and
+that the variance in how long a test method takes varies wildly
+dependent upon context (loaded Apache Infrastructure versus
+developer machine with nothing else running on it).
+
+### Test Resource Checker
+
+A custom Maven SureFire plugin listener checks a number of resources before and after each HBase unit test runs and logs its findings at the end of the test output files which can be found in _target/surefire-reports_ per Maven module (Tests write test reports named for the test class into this directory.
+Check the _\*-out.txt_ files). The resources counted are the number of threads, the number of file descriptors, etc.
+If the number has increased, it adds a _LEAK?_ comment in the logs.
+As you can have an HBase instance running in the background, some threads can be deleted/created without any specific action in the test.
+However, if the test does not work as expected, or if the test should not impact these resources, it's worth checking these log lines `...hbase.ResourceChecker(157): before...` and `...hbase.ResourceChecker(157): after...`.
+For example:
+
+```text
+2012-09-26 09:22:15,315 INFO [pool-1-thread-1]
+hbase.ResourceChecker(157): after:
+regionserver.TestColumnSeeking#testReseeking Thread=65 (was 65),
+OpenFileDescriptor=107 (was 107), MaxFileDescriptor=10240 (was 10240),
+ConnectionCount=1 (was 1)
+```
+
+## Writing Tests
+
+### General rules
+
+- As much as possible, tests should be written as category small tests.
+- All tests must be written to support parallel execution on the same machine, hence they should not use shared resources as fixed ports or fixed file names.
+- Tests should not overlog.
+ More than 100 lines/second makes the logs complex to read and use i/o that are hence not available for the other tests.
+- Tests can be written with `HBaseTestingUtility`.
+ This class offers helper functions to create a temp directory and do the cleanup, or to start a cluster.
+
+### Categories and execution time
+
+- All tests must be categorized, if not they could be skipped.
+- All tests should be written to be as fast as possible.
+- See [Unit Tests](/docs/building-and-developing/tests#building-and-developing-unit-tests) for test case categories and corresponding timeouts.
+ This should ensure a good parallelization for people using it, and ease the analysis when the test fails.
+
+### Sleeps in tests
+
+Whenever possible, tests should not use `Thread.sleep`, but rather waiting for the real event they need.
+This is faster and clearer for the reader.
+Tests should not do a `Thread.sleep` without testing an ending condition.
+This allows understanding what the test is waiting for.
+Moreover, the test will work whatever the machine performance is.
+Sleep should be minimal to be as fast as possible.
+Waiting for a variable should be done in a 40ms sleep loop.
+Waiting for a socket operation should be done in a 200 ms sleep loop.
+
+### Tests using a cluster
+
+Tests using a HRegion do not have to start a cluster: A region can use the local file system.
+Start/stopping a cluster cost around 10 seconds.
+They should not be started per test method but per test class.
+Started cluster must be shutdown using `HBaseTestingUtility#shutdownMiniCluster`, which cleans the directories.
+As most as possible, tests should use the default settings for the cluster.
+When they don't, they should document it.
+This will allow to share the cluster later.
+
+### Tests Skeleton Code
+
+Here is a test skeleton code with Categorization and a Category-based timeout rule to copy and paste and use as basis for test contribution.
+
+```java
+/**
+ * Describe what this testcase tests. Talk about resources initialized in @BeforeClass (before
+ * any test is run) and before each test is run, etc.
+ */
+// Specify the category as explained in Unit Tests section.
+@Category(SmallTests.class)
+public class TestExample {
+ // Replace the TestExample.class in the below with the name of your test fixture class.
+ private static final Log LOG = LogFactory.getLog(TestExample.class);
+
+ // Handy test rule that allows you subsequently get the name of the current method. See
+ // down in 'testExampleFoo()' where we use it to log current test's name.
+ @Rule public TestName testName = new TestName();
+
+ // The below rule does two things. It decides the timeout based on the category
+ // (small/medium/large) of the testcase. This @Rule requires that the full testcase runs
+ // within this timeout irrespective of individual test methods' times. The second
+ // feature is we'll dump in the log when the test is done a count of threads still
+ // running.
+ @Rule public static TestRule timeout = CategoryBasedTimeout.builder().
+ withTimeout(this.getClass()).withLookingForStuckThread(true).build();
+
+ @Before
+ public void setUp() throws Exception {
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ @Test
+ public void testExampleFoo() {
+ LOG.info("Running test " + testName.getMethodName());
+ }
+}
+```
+
+## Integration Tests [#building-and-developing-integration-tests]
+
+HBase integration/system tests are tests that are beyond HBase unit tests.
+They are generally long-lasting, sizeable (the test can be asked to 1M rows or 1B rows), targetable (they can take configuration that will point them at the ready-made cluster they are to run against; integration tests do not include cluster start/stop code), and verifying success, integration tests rely on public APIs only; they do not attempt to examine server internals asserting success/fail.
+Integration tests are what you would run when you need to more elaborate proofing of a release candidate beyond what unit tests can do.
+They are not generally run on the Apache Continuous Integration build server, however, some sites opt to run integration tests as a part of their continuous testing on an actual cluster.
+
+Integration tests currently live under the _src/test_ directory in the hbase-it submodule and will match the regex: _*IntegrationTest*.java_.
+All integration tests are also annotated with `@Category(IntegrationTests.class)`.
+
+Integration tests can be run in two modes: using a mini cluster, or against an actual distributed cluster.
+Maven failsafe is used to run the tests using the mini cluster.
+IntegrationTestsDriver class is used for executing the tests against a distributed cluster.
+Integration tests SHOULD NOT assume that they are running against a mini cluster, and SHOULD NOT use private API's to access cluster state.
+To interact with the distributed or mini cluster uniformly, `IntegrationTestingUtility`, and `HBaseCluster` classes, and public client API's can be used.
+
+On a distributed cluster, integration tests that use ChaosMonkey or otherwise manipulate services thru cluster manager (e.g.
+restart regionservers) use SSH to do it.
+To run these, test process should be able to run commands on remote end, so ssh should be configured accordingly (for example, if HBase runs under hbase user in your cluster, you can set up passwordless ssh for that user and run the test also under it). To facilitate that, `hbase.it.clustermanager.ssh.user`, `hbase.it.clustermanager.ssh.opts` and `hbase.it.clustermanager.ssh.cmd` configuration settings can be used.
+"User" is the remote user that cluster manager should use to perform ssh commands.
+"Opts" contains additional options that are passed to SSH (for example, "-i /tmp/my-key"). Finally, if you have some custom environment setup, "cmd" is the override format for the entire tunnel (ssh) command.
+The default string is \{`/usr/bin/ssh %1$s %2$s%3$s%4$s "%5$s"`\} and is a good starting point.
+This is a standard Java format string with 5 arguments that is used to execute the remote command.
+The argument 1 (%1$s) is SSH options set the via opts setting or via environment variable, 2 is SSH user name, 3 is "@" if username is set or "" otherwise, 4 is the target host name, and 5 is the logical command to execute (that may include single quotes, so don't use them). For example, if you run the tests under non-hbase user and want to ssh as that user and change to hbase on remote machine, you can use:
+
+```bash
+/usr/bin/ssh %1$s %2$s%3$s%4$s "su hbase - -c \"%5$s\""
+```
+
+That way, to kill RS (for example) integration tests may run:
+
+```bash
+{/usr/bin/ssh some-hostname "su hbase - -c \"ps aux | ... | kill ...\""}
+```
+
+The command is logged in the test logs, so you can verify it is correct for your environment.
+
+To disable the running of Integration Tests, pass the following profile on the command line `-PskipIntegrationTests`.
+For example,
+
+```bash
+$ mvn clean install test -Dtest=TestZooKeeper -PskipIntegrationTests
+```
+
+### Running integration tests against mini cluster
+
+HBase 0.92 added a `verify` maven target.
+Invoking it, for example by doing `mvn verify`, will run all the phases up to and including the verify phase via the maven [failsafe
+plugin](https://maven.apache.org/plugins/maven-failsafe-plugin/), running all the above mentioned HBase unit tests as well as tests that are in the HBase integration test group.
+After you have completed `mvn install -DskipTests` You can run just the integration tests by invoking:
+
+```bash
+cd hbase-it
+mvn verify
+```
+
+If you just want to run the integration tests in top-level, you need to run two commands.
+First:
+
+```bash
+mvn failsafe:integration-test
+```
+
+This actually runs ALL the integration tests.
+
+
+ This command will always output `BUILD SUCCESS` even if there are test failures.
+
+
+At this point, you could grep the output by hand looking for failed tests.
+However, maven will do this for us; just use:
+
+```bash
+mvn failsafe:verify
+```
+
+The above command basically looks at all the test results (so don't remove the 'target' directory) for test failures and reports the results.
+
+#### Running a subset of Integration tests
+
+This is very similar to how you specify running a subset of unit tests (see above), but use the property `it.test` instead of `test`.
+To just run `IntegrationTestClassXYZ.java`, use:
+
+```bash
+mvn failsafe:integration-test -Dit.test=IntegrationTestClassXYZ -DfailIfNoTests=false
+```
+
+The next thing you might want to do is run groups of integration tests, say all integration tests that are named IntegrationTestClassX\*.java:
+
+```bash
+mvn failsafe:integration-test -Dit.test=*ClassX* -DfailIfNoTests=false
+```
+
+This runs everything that is an integration test that matches **ClassX**. This means anything matching: "**\*/IntegrationTest\*ClassX**". You can also run multiple groups of integration tests using comma-delimited lists (similar to unit tests). Using a list of matches still supports full regex matching for each of the groups. This would look something like:
+
+```bash
+mvn failsafe:integration-test -Dit.test=*ClassX*,*ClassY -DfailIfNoTests=false
+```
+
+### Running integration tests against distributed cluster
+
+If you have an already-setup HBase cluster, you can launch the integration tests by invoking the class `IntegrationTestsDriver`.
+You may have to run test-compile first.
+The configuration will be picked by the bin/hbase script.
+
+```bash
+mvn test-compile
+```
+
+Then launch the tests with:
+
+```bash
+bin/hbase [--config config_dir] org.apache.hadoop.hbase.IntegrationTestsDriver
+```
+
+Pass `-h` to get usage on this sweet tool.
+Running the IntegrationTestsDriver without any argument will launch tests found under `hbase-it/src/test`, having `@Category(IntegrationTests.class)` annotation, and a name starting with `IntegrationTests`.
+See the usage, by passing -h, to see how to filter test classes.
+You can pass a regex which is checked against the full class name; so, part of class name can be used.
+IntegrationTestsDriver uses Junit to run the tests.
+Currently there is no support for running integration tests against a distributed cluster using maven (see [HBASE-6201](https://issues.apache.org/jira/browse/HBASE-6201)).
+
+The tests interact with the distributed cluster by using the methods in the `DistributedHBaseCluster` (implementing `HBaseCluster`) class, which in turn uses a pluggable `ClusterManager`.
+Concrete implementations provide actual functionality for carrying out deployment-specific and environment-dependent tasks (SSH, etc). The default `ClusterManager` is `HBaseClusterManager`, which uses SSH to remotely execute start/stop/kill/signal commands, and assumes some posix commands (ps, etc). Also assumes the user running the test has enough "power" to start/stop servers on the remote machines.
+By default, it picks up `HBASE_SSH_OPTS`, `HBASE_HOME`, `HBASE_CONF_DIR` from the env, and uses `bin/hbase-daemon.sh` to carry out the actions.
+Currently tarball deployments, deployments which uses _hbase-daemons.sh_, and [Apache Ambari](https://incubator.apache.org/ambari/) deployments are supported.
+_/etc/init.d/_ scripts are not supported for now, but it can be easily added.
+For other deployment options, a ClusterManager can be implemented and plugged in.
+
+Some integration tests define a _main_ method as entry point, and can be run on its' own, rather than using the test driver. For example, the _itbll_ test can be run as follows:
+
+```bash
+bin/hbase org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList loop 2 1 100000 /temp 1 1000 50 1 0
+```
+
+
+ The _hbase_ script assumes all integration tests with exposed _main_ methods to be run against a
+ distributed cluster will follow the **IntegrationTest** regex naming pattern mentioned above, in
+ order to proper set test dependencies into the classpath.
+
+
+### Destructive integration / system tests (ChaosMonkey)
+
+HBase 0.96 introduced a tool named `ChaosMonkey`, modeled after
+[same-named tool by Netflix's Chaos Monkey tool](https://netflix.github.io/chaosmonkey/).
+ChaosMonkey simulates real-world
+faults in a running cluster by killing or disconnecting random servers, or injecting
+other failures into the environment. You can use ChaosMonkey as a stand-alone tool
+to run a policy while other tests are running. In some environments, ChaosMonkey is
+always running, in order to constantly check that high availability and fault tolerance
+are working as expected.
+
+ChaosMonkey defines **Actions** and **Policies**.
+
+#### Actions:
+
+Actions are predefined sequences of events, such as the following:
+
+- Restart active master (sleep 5 sec)
+- Restart random regionserver (sleep 5 sec)
+- Restart random regionserver (sleep 60 sec)
+- Restart META regionserver (sleep 5 sec)
+- Restart ROOT regionserver (sleep 5 sec)
+- Batch restart of 50% of regionservers (sleep 5 sec)
+- Rolling restart of 100% of regionservers (sleep 5 sec)
+
+#### Policies:
+
+A policy is a strategy for executing one or more actions. The default policy
+executes a random action every minute based on predefined action weights.
+A given policy will be executed until ChaosMonkey is interrupted.
+
+Most ChaosMonkey actions are configured to have reasonable defaults, so you can run
+ChaosMonkey against an existing cluster without any additional configuration. The
+following example runs ChaosMonkey with the default configuration:
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner
+
+12/11/19 23:21:57 INFO util.ChaosMonkey: Using ChaosMonkey Policy: class org.apache.hadoop.hbase.util.ChaosMonkey$PeriodicRandomActionPolicy, period:60000
+12/11/19 23:21:57 INFO util.ChaosMonkey: Sleeping for 26953 to add jitter
+12/11/19 23:22:24 INFO util.ChaosMonkey: Performing action: Restart active master
+12/11/19 23:22:24 INFO util.ChaosMonkey: Killing master:master.example.com,60000,1353367210440
+12/11/19 23:22:24 INFO hbase.HBaseCluster: Aborting Master: master.example.com,60000,1353367210440
+12/11/19 23:22:24 INFO hbase.ClusterManager: Executing remote command: ps aux | grep master | grep -v grep | tr -s ' ' | cut -d ' ' -f2 | xargs kill -s SIGKILL , hostname:master.example.com
+12/11/19 23:22:25 INFO hbase.ClusterManager: Executed remote command, exit code:0 , output:
+12/11/19 23:22:25 INFO hbase.HBaseCluster: Waiting service:master to stop: master.example.com,60000,1353367210440
+12/11/19 23:22:25 INFO hbase.ClusterManager: Executing remote command: ps aux | grep master | grep -v grep | tr -s ' ' | cut -d ' ' -f2 , hostname:master.example.com
+12/11/19 23:22:25 INFO hbase.ClusterManager: Executed remote command, exit code:0 , output:
+12/11/19 23:22:25 INFO util.ChaosMonkey: Killed master server:master.example.com,60000,1353367210440
+12/11/19 23:22:25 INFO util.ChaosMonkey: Sleeping for:5000
+12/11/19 23:22:30 INFO util.ChaosMonkey: Starting master:master.example.com
+12/11/19 23:22:30 INFO hbase.HBaseCluster: Starting Master on: master.example.com
+12/11/19 23:22:30 INFO hbase.ClusterManager: Executing remote command: /homes/enis/code/hbase-0.94/bin/../bin/hbase-daemon.sh --config /homes/enis/code/hbase-0.94/bin/../conf start master , hostname:master.example.com
+12/11/19 23:22:31 INFO hbase.ClusterManager: Executed remote command, exit code:0 , output:starting master, logging to /homes/enis/code/hbase-0.94/bin/../logs/hbase-enis-master-master.example.com.out
+....
+12/11/19 23:22:33 INFO util.ChaosMonkey: Started master: master.example.com,60000,1353367210440
+12/11/19 23:22:33 INFO util.ChaosMonkey: Sleeping for:51321
+12/11/19 23:23:24 INFO util.ChaosMonkey: Performing action: Restart random region server
+12/11/19 23:23:24 INFO util.ChaosMonkey: Killing region server:rs3.example.com,60020,1353367027826
+12/11/19 23:23:24 INFO hbase.HBaseCluster: Aborting RS: rs3.example.com,60020,1353367027826
+12/11/19 23:23:24 INFO hbase.ClusterManager: Executing remote command: ps aux | grep regionserver | grep -v grep | tr -s ' ' | cut -d ' ' -f2 | xargs kill -s SIGKILL , hostname:rs3.example.com
+12/11/19 23:23:25 INFO hbase.ClusterManager: Executed remote command, exit code:0 , output:
+12/11/19 23:23:25 INFO hbase.HBaseCluster: Waiting service:regionserver to stop: rs3.example.com,60020,1353367027826
+12/11/19 23:23:25 INFO hbase.ClusterManager: Executing remote command: ps aux | grep regionserver | grep -v grep | tr -s ' ' | cut -d ' ' -f2 , hostname:rs3.example.com
+12/11/19 23:23:25 INFO hbase.ClusterManager: Executed remote command, exit code:0 , output:
+12/11/19 23:23:25 INFO util.ChaosMonkey: Killed region server:rs3.example.com,60020,1353367027826. Reported num of rs:6
+12/11/19 23:23:25 INFO util.ChaosMonkey: Sleeping for:60000
+12/11/19 23:24:25 INFO util.ChaosMonkey: Starting region server:rs3.example.com
+12/11/19 23:24:25 INFO hbase.HBaseCluster: Starting RS on: rs3.example.com
+12/11/19 23:24:25 INFO hbase.ClusterManager: Executing remote command: /homes/enis/code/hbase-0.94/bin/../bin/hbase-daemon.sh --config /homes/enis/code/hbase-0.94/bin/../conf start regionserver , hostname:rs3.example.com
+12/11/19 23:24:26 INFO hbase.ClusterManager: Executed remote command, exit code:0 , output:starting regionserver, logging to /homes/enis/code/hbase-0.94/bin/../logs/hbase-enis-regionserver-rs3.example.com.out
+
+12/11/19 23:24:27 INFO util.ChaosMonkey: Started region server:rs3.example.com,60020,1353367027826. Reported num of rs:6
+```
+
+The output indicates that ChaosMonkey started the default `PeriodicRandomActionPolicy`
+policy, which is configured with all the available actions. It chose to run `RestartActiveMaster` and `RestartRandomRs` actions.
+
+### ChaosMonkey without SSH
+
+Chaos monkey can be run without SSH using the Chaos service and ZNode cluster manager. HBase ships
+with many cluster managers, available in the `hbase-it/src/test/java/org/apache/hadoop/hbase/` directory.
+
+Set the following property in hbase configuration to switch to `ZNodeClusterManager`:
+
+```xml
+
+ hbase.it.clustermanager.class
+ org.apache.hadoop.hbase.ZNodeClusterManager
+
+```
+
+Start chaos agent on all hosts where you want to test chaos scenarios.
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.chaos.ChaosService -c start
+```
+
+Start chaos monkey runner from any one host, preferrably an edgenode.
+An example log while running chaos monkey with default policy `PeriodicRandomActionPolicy` is as shown below:
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner
+
+INFO [main] hbase.HBaseCommonTestingUtility: Instantiating org.apache.hadoop.hbase.ZNodeClusterManager
+INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe] zookeeper.ZooKeeper: Initiating client connection, connectString=host1.example.com:2181,host2.example.com:2181,host3.example.com:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$19/2106254492@1a39cf8
+INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe] zookeeper.ClientCnxnSocket: jute.maxbuffer value is 4194304 Bytes
+INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe] zookeeper.ClientCnxn: zookeeper.request.timeout value is 0. feature enabled=
+INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe-SendThread(host2.example.com:2181)] zookeeper.ClientCnxn: Opening socket connection to server host2.example.com/10.20.30.40:2181. Will not attempt to authenticate using SASL (unknown error)
+INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe-SendThread(host2.example.com:2181)] zookeeper.ClientCnxn: Socket connection established, initiating session, client: /10.20.30.40:35164, server: host2.example.com/10.20.30.40:2181
+INFO [ReadOnlyZKClient-host1.example.com:2181,host2.example.com:2181,host3.example.com:2181@0x003d43fe-SendThread(host2.example.com:2181)] zookeeper.ClientCnxn: Session establishment complete on server host2.example.com/10.20.30.40:2181, sessionid = 0x101de9204670877, negotiated timeout = 60000
+INFO [main] policies.Policy: Using ChaosMonkey Policy class org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy, period=60000 ms
+ [ChaosMonkey-2] policies.Policy: Sleeping for 93741 ms to add jitter
+INFO [ChaosMonkey-0] policies.Policy: Sleeping for 9752 ms to add jitter
+INFO [ChaosMonkey-1] policies.Policy: Sleeping for 65562 ms to add jitter
+INFO [ChaosMonkey-3] policies.Policy: Sleeping for 38777 ms to add jitter
+INFO [ChaosMonkey-0] actions.CompactRandomRegionOfTableAction: Performing action: Compact random region of table usertable, major=false
+INFO [ChaosMonkey-0] policies.Policy: Sleeping for 59532 ms
+INFO [ChaosMonkey-3] client.ConnectionImplementation: Getting master connection state from TTL Cache
+INFO [ChaosMonkey-3] client.ConnectionImplementation: Getting master state using rpc call
+INFO [ChaosMonkey-3] actions.DumpClusterStatusAction: Cluster status
+Master: host1.example.com,16000,1678339058222
+Number of backup masters: 0
+Number of live region servers: 3
+ host1.example.com,16020,1678794551244
+ host2.example.com,16020,1678341258970
+ host3.example.com,16020,1678347834336
+Number of dead region servers: 0
+Number of unknown region servers: 0
+Average load: 123.6666666666666
+Number of requests: 118645157
+Number of regions: 2654
+Number of regions in transition: 0
+INFO [ChaosMonkey-3] policies.Policy: Sleeping for 89614 ms
+```
+
+For info on more customisations we can see help for the `ChaosMonkeyRunner`. For example we can pass the table name on which the chaos operations to be performed etc.
+Below is the output of the help command, listing all the supported options.
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner --help
+
+usage: hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner
+Options:
+ -c Name of extra configurations file to find on CLASSPATH
+ -m,--monkey Which chaos monkey to run
+ -monkeyProps The properties file for specifying chaos monkey properties.
+ -tableName Table name in the test to run chaos monkey against
+ -familyName Family name in the test to run chaos monkey against
+```
+
+For example, running the following will start `ServerKillingMonkeyFactory` that chooses among actions to rolling batch restart RS, graceful rolling restart RS one at a time, restart active master, force balancer run etc.
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner -m org.apache.hadoop.hbase.chaos.factories.ServerKillingMonkeyFactory
+```
+
+### Available Policies
+
+HBase ships with several ChaosMonkey policies, available in the
+`hbase/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/` directory.
+
+### Configuring Individual ChaosMonkey Actions
+
+ChaosMonkey integration tests can be configured per test run.
+Create a Java properties file in the HBase CLASSPATH and pass it to ChaosMonkey using
+the `-monkeyProps` configuration flag. Configurable properties, along with their default
+values if applicable, are listed in the `org.apache.hadoop.hbase.chaos.factories.MonkeyConstants`
+class. For properties that have defaults, you can override them by including them
+in your properties file.
+
+The following example uses a properties file called `monkey.properties`.
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.IntegrationTestIngest -m slowDeterministic -monkeyProps monkey.properties
+```
+
+The above command will start the integration tests and chaos monkey. It will look for the
+properties file _monkey.properties_ on the HBase CLASSPATH; e.g. inside the HBASE _conf_ dir.
+
+Here is an example chaos monkey file:
+
+#### Example ChaosMonkey Properties File
+
+```properties
+sdm.action1.period=120000
+sdm.action2.period=40000
+move.regions.sleep.time=80000
+move.regions.max.time=1000000
+move.regions.sleep.time=80000
+batch.restart.rs.ratio=0.4f
+```
+
+Periods/time are expressed in milliseconds.
+
+HBase 1.0.2 and newer adds the ability to restart HBase's underlying ZooKeeper quorum or
+HDFS nodes. To use these actions, you need to configure some new properties, which
+have no reasonable defaults because they are deployment-specific, in your ChaosMonkey
+properties file, which may be `hbase-site.xml` or a different properties file.
+
+```xml
+
+ hbase.it.clustermanager.hadoop.home
+ $HADOOP_HOME
+
+
+ hbase.it.clustermanager.zookeeper.home
+ $ZOOKEEPER_HOME
+
+
+ hbase.it.clustermanager.hbase.user
+ hbase
+
+
+ hbase.it.clustermanager.hadoop.hdfs.user
+ hdfs
+
+
+ hbase.it.clustermanager.zookeeper.user
+ zookeeper
+
+```
+
+### Customizing Destructive ChaosMonkey Actions
+
+The session above shows how to setup custom configurations for the _slowDeterministic_ monkey
+policy. This is a policy that pre-defines a set of destructive actions of varying gravity for a
+running cluster. These actions are grouped into three categories: _light weight_, _mid weight_ and
+_heavy weight_. Although it's possible to define some properties for the different actions
+(such as timeouts, frequency, etc), the actions themselves are not configurable.
+
+For certain deployments, it may be interesting to define its own test strategy, either less or more
+aggressive than the pre-defined set of actions provided by _slowDeterministic_. For such cases,
+the _configurableSlowDeterministic_ policy can be used. It allows for a customizable set of
+_heavy weight_ actions to be defined in the _monkey.properties_ properties file:
+
+```properties
+batch.restart.rs.ratio=0.3f
+heavy.actions=RestartRandomRsAction(500000);MoveRandomRegionOfTableAction(360000,$table_name);SplitAllRegionOfTableAction($table_name)
+```
+
+The above properties file definition instructs chaos monkey to perform a RegionServer crash every 8 minutes,
+a random region move every 6 minutes, and at least one split of all table regions.
+
+To run this policy, just specify _configurableSlowDeterministic_ as the monkey policy to run, together
+with a property file containing the _heavy.actions_ property definition:
+
+```bash
+$ bin/hbase org.apache.hadoop.hbase.IntegrationTestIngest -m configurableSlowDeterministic -monkeyProps monkey.properties
+```
+
+When specifying monkey actions, make sure to define all required constructor parameters. For actions
+that require a table name parameter, the _$table_name_ placeholder can be specified, and it will
+automatically resort to the table created by the integration test run.
+
+If _heavy.actions_ property is omitted in the properties file, _configurableSlowDeterministic_ will
+just run as the _slowDeterministic_ policy
+(it will execute all the heavy weight actions defined by _slowDeterministic_ policy).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/updating-landing.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/updating-landing.mdx
new file mode 100644
index 000000000000..1bf85be204a3
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/updating-landing.mdx
@@ -0,0 +1,12 @@
+---
+title: "Updating hbase.apache.org"
+description: "Contributing to and publishing the HBase website and documentation using gitpubsub mechanism."
+---
+
+## Contributing to hbase.apache.org
+
+See [appendix contributing to documentation](/docs/contributing-to-documentation) for more information on contributing to the documentation or website.
+
+## Publishing hbase.apache.org
+
+See [Publishing the HBase Website and Documentation](/docs/contributing-to-documentation#publishing-the-hbase-website-and-documentation) for instructions on publishing the website and documentation.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/voting.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/voting.mdx
new file mode 100644
index 000000000000..a63f3fff696c
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/building-and-developing/voting.mdx
@@ -0,0 +1,64 @@
+---
+title: "Voting on Release Candidates"
+description: "Baseline and additional verification procedures for voting on HBase release candidates including checksums, signatures, and testing."
+---
+
+Everyone is encouraged to try and vote on HBase release candidates.
+Only the votes of PMC members are binding.
+PMC members, please read this WIP doc on policy voting for a release candidate, [Release Policy](https://github.com/rectang/asfrelease/blob/master/release.md).
+
+> Before casting +1 binding votes, individuals are required to
+> download the signed source code package onto their own hardware, compile it as
+> provided, and test the resulting executable on their own platform, along with also
+> validating cryptographic signatures and verifying that the package meets the
+> requirements of the ASF policy on releases.
+
+Regards the latter, run `mvn apache-rat:check` to verify all files are suitably licensed.
+See [HBase, mail # dev - On recent discussion clarifying ASF release policy](https://mail-archives.apache.org/mod_mbox/hbase-dev/201406.mbox/%3CCA%2BRK%3D_B8EP0JMFV%2Bdt-k1g%3DBmedzyq2z1GSqrnMMiH6%3DcdoiAA%40mail.gmail.com%3E)
+for how we arrived at this process.
+
+To help with the release verification, please follow the guideline below and vote based on the your verification.
+
+## Baseline Verifications for Voting Release Candidates
+
+Although contributors have their own checklist for verifications, the following items are usually used for voting on release candidates.
+
+- CHANGES.md if any
+- RELEASENOTES.md (release notes) if any
+- Generated API compatibility report
+ - For what should be compatible please refer the [versioning guideline](/docs/upgrading/version-number), especially for items with marked as high severity
+- Use `hbase-vote.sh` to perform sanity checks for checksum, signatures, files are licensed, built from source, and unit tests.
+ - `hbase-vote.sh` shell script is available under `dev-support` directory of HBase source. Following are the usage details.
+
+ ```bash
+ ./dev-support/hbase-vote.sh -h
+ hbase-vote. A script for standard vote which verifies the following items
+ 1. Checksum of sources and binaries
+ 2. Signature of sources and binaries
+ 3. Rat check
+ 4. Built from source
+ 5. Unit tests
+
+ Usage: hbase-vote.sh -s | --source [-k | --key ] [-f | --keys-file-url ] [-o | --output-dir ] [-P runSmallTests] [-D property[=value]]
+ hbase-vote.sh -h | --help
+
+ -h | --help Show this screen.
+ -s | --source '' A URL pointing to the release candidate sources and binaries
+ e.g. https://dist.apache.org/repos/dist/dev/hbase/hbase-RC0/
+ -k | --key '' A signature of the public key, e.g. 9AD2AE49
+ -f | --keys-file-url '' the URL of the key file, default is
+ https://downloads.apache.org/hbase/KEYS
+ -o | --output-dir '' directory which has the stdout and stderr of each verification target
+ -P | list of maven profiles to activate for test UT/IT, i.e. <-P runSmallTests> Defaults to runAllTests
+ -D | list of maven properties to set for the mvn invocations, i.e. <-D hadoop.profile=3.0> Defaults to unset
+ ```
+
+- If you see any unit test failures, please call out the solo test result and whether it's part of flaky (nightly) tests dashboard, e.g. [dashboard of master branch](https://builds.apache.org/view/H-L/view/HBase/job/HBase-Find-Flaky-Tests/job/master/lastSuccessfulBuild/artifact/dashboard.html) (please change the test branch accordingly).
+
+## Additional Verifications for Voting Release Candidates
+
+Other than the common verifications, contributors may call out additional concerns, e.g. for a specific feature by running end to end tests on a distributed environment. This is optional and always encouraged.
+
+- Start a distributed HBase cluster and call out the test result of specific workload on cluster. e.g.
+ - Run basic table operations, e.g. `create/put/get/scan/flush/list/disable/drop`
+ - Run built-in tests, e.g. `LoadTestTool` (LTT) and `IntegrationTestBigLinkedList` (ITBLL)
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/bulk-data-generator-tool.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/bulk-data-generator-tool.mdx
new file mode 100644
index 000000000000..dddd51765a2a
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/bulk-data-generator-tool.mdx
@@ -0,0 +1,105 @@
+---
+title: "Bulk Data Generator Tool"
+description: "This is a random data generator tool for HBase tables leveraging Hbase bulk load. It can create pre-splited HBase table and the generated data is uniformly distributed to all the regions of the table."
+---
+
+## Usage [#bulk-data-generator-tool-usage]
+
+```bash
+usage: hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool [-D]*
+ -d,--delete-if-exist If it's set, the table will be deleted if already exist.
+ -h,--help Show help message for the tool
+ -mc,--mapper-count The number of mapper containers to be launched.
+ -o,--table-options Table options to be set while creating the table.
+ -r,--rows-per-mapper The number of rows to be generated PER mapper.
+ -sc,--split-count The number of regions/pre-splits to be created for the table.
+ -t,--table The table name for which data need to be generated.
+```
+
+Examples:
+
+```bash
+hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10
+
+hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10 -d -o "BACKUP=false,NORMALIZATION_ENABLED=false"
+
+hbase org.apache.hadoop.hbase.util.bulkdatagenerator.BulkDataGeneratorTool -t TEST_TABLE -mc 10 -r 100 -sc 10 -Dmapreduce.map.memory.mb=8192
+```
+
+## Overview [#bulk-data-generator-tool-overview]
+
+### Table Schema
+
+Tool generates a HBase table with single column family, i.e. **cf** and 9 columns i.e.
+
+```
+ORG_ID, TOOL_EVENT_ID, EVENT_ID, VEHICLE_ID, SPEED, LATITUDE, LONGITUDE, LOCATION, TIMESTAMP
+```
+
+with row key as
+
+```
+:
+```
+
+### Table Creation
+
+Tool creates a pre-splited HBase Table having "**split-count**" splits (i.e. **split-count** + 1 regions) with sequential 6 digit region boundary prefix.
+Example: If a table is generated with "**split-count**" as 10, it will have (10+1) regions with following start-end keys.
+
+```
+(-000001, 000001-000002, 000002-000003, ...., 000009-000010, 0000010-)
+```
+
+### Data Generation
+
+Tool creates and run a MR job to generate the HFiles, which are bulk loaded to table regions via `org.apache.hadoop.hbase.tool.BulkLoadHFilesTool`.
+The number of mappers is defined in input as "**mapper-count**". Each mapper generates "**records-per-mapper**" rows.
+
+`org.apache.hadoop.hbase.util.bulkdatageneratorBulkDataGeneratorRecordReader` ensures that each record generated by mapper is associated with index (added to the key) ranging from 1 to "**records-per-mapper**".
+
+The TOOL_EVENT_ID column for each row has a 6 digit prefix as
+
+```
+(index) mod ("split-count" + 1)
+```
+
+Example, if 10 records are to be generated by each mapper and "**split-count**" is 4, the TOOL_EVENT_IDs for each record will have a prefix as
+
+| Record Index | TOOL_EVENT_ID's first six characters |
+| ------------ | ------------------------------------ |
+| 1 | 000001 |
+| 2 | 000002 |
+| 3 | 000003 |
+| 4 | 000004 |
+| 5 | 000000 |
+| 6 | 000001 |
+| 7 | 000002 |
+| 8 | 000003 |
+| 9 | 000004 |
+| 10 | 000005 |
+
+Since TOOL_EVENT_ID is first attribute of row key and table region boundaries are also having start-end keys as 6 digit sequential prefixes, this ensures that each mapper generates (nearly) same number of rows for each region, making the data uniformly distributed.
+TOOL_EVENT_ID suffix and other columns of the row are populated with random data.
+
+Number of rows generated is
+
+```
+rows-per-mapper * mapper-count
+```
+
+Size of each rows is (approximately)
+
+```
+850 B
+```
+
+### Experiments
+
+These results are from a 11 node cluster having HBase and Hadoop service running within self-managed test environment
+
+| Data Size | Time to Generate Data (mins) |
+| --------- | ---------------------------- |
+| 100 GB | 6 minutes |
+| 340 GB | 13 minutes |
+| 3.5 TB | 3 hours 10 minutes |
diff --git a/src/main/asciidoc/_chapters/case_studies.adoc b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/case-studies.mdx
similarity index 75%
rename from src/main/asciidoc/_chapters/case_studies.adoc
rename to hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/case-studies.mdx
index 96b6f2e07d51..980632131292 100644
--- a/src/main/asciidoc/_chapters/case_studies.adoc
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/case-studies.mdx
@@ -1,85 +1,56 @@
-////
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-////
-
-[[casestudies]]
-= Apache HBase Case Studies
-:doctype: book
-:numbered:
-:toc: left
-:icons: font
-:experimental:
-
-[[casestudies.overview]]
-== Overview
+---
+title: "Apache HBase Case Studies"
+description: "Performance and troubleshooting case studies for diagnosing Apache HBase cluster issues."
+---
+
+## Overview [#case-studies-overview]
This chapter will describe a variety of performance and troubleshooting case studies that can provide a useful blueprint on diagnosing Apache HBase cluster issues.
-For more information on Performance and Troubleshooting, see <> and <>.
+For more information on Performance and Troubleshooting, see [Apache HBase Performance Tuning](/docs/performance) and [Troubleshooting and Debugging Apache HBase](/docs/troubleshooting).
-[[casestudies.schema]]
-== Schema Design
+## Schema Design [#case-studies-schema-design]
-See the schema design case studies here: <>
+See the schema design case studies here: [Schema Design Case Studies](/docs/regionserver-sizing#schema-design-case-studies)
-[[casestudies.perftroub]]
-== Performance/Troubleshooting
+## Performance/Troubleshooting
-[[casestudies.slownode]]
-=== Case Study #1 (Performance Issue On A Single Node)
+### Case Study \#1 (Performance Issue On A Single Node)
-==== Scenario
+#### Scenario
Following a scheduled reboot, one data node began exhibiting unusual behavior.
Routine MapReduce jobs run against HBase tables which regularly completed in five or six minutes began taking 30 or 40 minutes to finish.
These jobs were consistently found to be waiting on map and reduce tasks assigned to the troubled data node (e.g., the slow map tasks all had the same Input Split). The situation came to a head during a distributed copy, when the copy was severely prolonged by the lagging node.
-==== Hardware
+#### Hardware
+
+Datanodes:
-.Datanodes:
-* Two 12-core processors
-* Six Enterprise SATA disks
-* 24GB of RAM
-* Two bonded gigabit NICs
+- Two 12-core processors
+- Six Enterprise SATA disks
+- 24GB of RAM
+- Two bonded gigabit NICs
-.Network:
-* 10 Gigabit top-of-rack switches
-* 20 Gigabit bonded interconnects between racks.
+Network:
-==== Hypotheses
+- 10 Gigabit top-of-rack switches
+- 20 Gigabit bonded interconnects between racks.
-===== HBase "Hot Spot" Region
+#### Hypotheses
+**HBase "Hot Spot" Region**
We hypothesized that we were experiencing a familiar point of pain: a "hot spot" region in an HBase table, where uneven key-space distribution can funnel a huge number of requests to a single HBase region, bombarding the RegionServer process and cause slow response time.
Examination of the HBase Master status page showed that the number of HBase requests to the troubled node was almost zero.
Further, examination of the HBase logs showed that there were no region splits, compactions, or other region transitions in progress.
This effectively ruled out a "hot spot" as the root cause of the observed slowness.
-===== HBase Region With Non-Local Data
-
+**HBase Region With Non-Local Data**
Our next hypothesis was that one of the MapReduce tasks was requesting data from HBase that was not local to the DataNode, thus forcing HDFS to request data blocks from other servers over the network.
Examination of the DataNode logs showed that there were very few blocks being requested over the network, indicating that the HBase region was correctly assigned, and that the majority of the necessary data was located on the node.
This ruled out the possibility of non-local data causing a slowdown.
-===== Excessive I/O Wait Due To Swapping Or An Over-Worked Or Failing Hard Disk
-
+**Excessive I/O Wait Due To Swapping Or An Over-Worked Or Failing Hard Disk**
After concluding that the Hadoop and HBase were not likely to be the culprits, we moved on to troubleshooting the DataNode's hardware.
Java, by design, will periodically scan its entire memory space to do garbage collection.
If system memory is heavily overcommitted, the Linux kernel may enter a vicious cycle, using up all of its resources swapping Java heap back and forth from disk to RAM as Java tries to run garbage collection.
@@ -88,18 +59,15 @@ This can manifest as high iowait, as running processes wait for reads and writes
Finally, a disk nearing the upper edge of its performance envelope will begin to cause iowait as it informs the kernel that it cannot accept any more data, and the kernel queues incoming data into the dirty write pool in memory.
However, using `vmstat(1)` and `free(1)`, we could see that no swap was being used, and the amount of disk IO was only a few kilobytes per second.
-===== Slowness Due To High Processor Usage
-
+**Slowness Due To High Processor Usage**
Next, we checked to see whether the system was performing slowly simply due to very high computational load. `top(1)` showed that the system load was higher than normal, but `vmstat(1)` and `mpstat(1)` showed that the amount of processor being used for actual computation was low.
-===== Network Saturation (The Winner)
-
+**Network Saturation (The Winner)**
Since neither the disks nor the processors were being utilized heavily, we moved on to the performance of the network interfaces.
The DataNode had two gigabit ethernet adapters, bonded to form an active-standby interface. `ifconfig(8)` showed some unusual anomalies, namely interface errors, overruns, framing errors.
While not unheard of, these kinds of errors are exceedingly rare on modern hardware which is operating as it should:
-----
-
+```bash
$ /sbin/ifconfig bond0
bond0 Link encap:Ethernet HWaddr 00:00:00:00:00:00
inet addr:10.x.x.x Bcast:10.x.x.255 Mask:255.255.255.0
@@ -108,13 +76,12 @@ RX packets:2990700159 errors:12 dropped:0 overruns:1 frame:6 <--- Look
TX packets:3443518196 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:2416328868676 (2.4 TB) TX bytes:3464991094001 (3.4 TB)
-----
+```
These errors immediately lead us to suspect that one or more of the ethernet interfaces might have negotiated the wrong line speed.
This was confirmed both by running an ICMP ping from an external host and observing round-trip-time in excess of 700ms, and by running `ethtool(8)` on the members of the bond interface and discovering that the active interface was operating at 100Mbs/, full duplex.
-----
-
+```bash
$ sudo ethtool eth0
Settings for eth0:
Supported ports: [ TP ]
@@ -141,30 +108,27 @@ Supports Wake-on: umbg
Wake-on: g
Current message level: 0x00000003 (3)
Link detected: yes
-----
+```
In normal operation, the ICMP ping round trip time should be around 20ms, and the interface speed and duplex should read, "1000MB/s", and, "Full", respectively.
-==== Resolution
+#### Resolution
After determining that the active ethernet adapter was at the incorrect speed, we used the `ifenslave(8)` command to make the standby interface the active interface, which yielded an immediate improvement in MapReduce performance, and a 10 times improvement in network throughput:
On the next trip to the datacenter, we determined that the line speed issue was ultimately caused by a bad network cable, which was replaced.
-[[casestudies.perf.1]]
-=== Case Study #2 (Performance Research 2012)
+### Case Study \#2 (Performance Research 2012)
Investigation results of a self-described "we're not sure what's wrong, but it seems slow" problem. http://gbif.blogspot.com/2012/03/hbase-performance-evaluation-continued.html
-[[casestudies.perf.2]]
-=== Case Study #3 (Performance Research 2010))
+### Case Study \#3 (Performance Research 2010)
Investigation results of general cluster performance from 2010.
Although this research is on an older version of the codebase, this writeup is still very useful in terms of approach. https://web.archive.org/web/20180503124332/http://hstack.org/hbase-performance-testing/
-[[casestudies.max.transfer.threads]]
-=== Case Study #4 (max.transfer.threads Config)
+### Case Study #4 (max.transfer.threads Config)
Case study of configuring `max.transfer.threads` (previously known as `xcievers`) and diagnosing errors from misconfigurations. http://www.larsgeorge.com/2012/03/hadoop-hbase-and-xceivers.html
-See also <>.
+See also [`dfs.datanode.max.transfer.threads`](/docs/configuration/basic-prerequisites#dfsdatanodemaxtransferthreads).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/community.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/community.mdx
new file mode 100644
index 000000000000..afdc27e91ab1
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/community.mdx
@@ -0,0 +1,67 @@
+---
+title: "Community"
+description: "HBase community decisions, release managers, JIRA policies, commit message format, and guidelines for feature branches."
+---
+
+## Decisions
+
+### Feature Branches
+
+Feature Branches are easy to make. You do not have to be a committer to make one. Just request the name of your branch be added to JIRA up on the developer's mailing list and a committer will add it for you. Thereafter you can file issues against your feature branch in Apache HBase JIRA. Your code you keep elsewhere — it should be public so it can be observed — and you can update dev mailing list on progress. When the feature is ready for commit, 3 +1s from committers will get your feature merged. See [HBase, mail # dev - Thoughts about large feature dev branches](https://lists.apache.org/thread.html/200513c7e7e4df23c8b9134eeee009d61205c79314e77f222d396006%401346870308%40%3Cdev.hbase.apache.org%3E)
+
+### How to set fix version in JIRA on issue resolve
+
+Here is how we agreed to set versions in JIRA when we resolve an issue. If master is going to be 3.0.0, branch-2 will be 2.4.0, and branch-1 will be 1.7.0 then:
+
+- Commit only to master (i.e., backward-incompatible new feature): Mark with 3.0.0
+- Commit only to master and branch-2 (i.e., backward-compatible new feature, applicable only to 2.x+): Mark with 3.0.0 and 2.4.0
+- Commit to master, branch-2, and branch-1 (i.e., backward-compatible new feature, applicable everywhere): Mark with 3.0.0, 2.4.0, and 1.7.0
+- Commit to master, branch-2, and branch-2.3, branch-1, branch-1.4 (i.e., bug fix applicable to all active release lines): Mark with 3.0.0, 2.4.0, 2.3.x, 1.7.0, and 1.4.x
+- Commit a fix to the website: no version
+
+### Policy on when to set a RESOLVED JIRA as CLOSED
+
+We agreed that for issues that list multiple releases in their _Fix Version/s_ field, CLOSE the issue on the release of any of the versions listed; subsequent change to the issue must happen in a new JIRA.
+
+### Only transient state in ZooKeeper!
+
+You should be able to kill the data in zookeeper and hbase should ride over it recreating the zk content as it goes. This is an old adage around these parts. We just made note of it now. We also are currently in violation of this basic tenet — replication at least keeps permanent state in zk — but we are working to undo this breaking of a golden rule.
+
+## Community Roles
+
+### Release Managers
+
+Each maintained release branch has a release manager, who volunteers to coordinate new features and bug fixes are backported to that release. The release managers are [committers](/team). If you would like your feature or bug fix to be included in a given release, communicate with that release manager. If this list goes out of date or you can't reach the listed person, reach out to someone else on the list.
+
+**Release Managers:**
+
+| Release | Release Manager | Latest Release | EOL |
+| ------- | ----------------- | ------------------------------------- | -------------- |
+| 0.94 | Lars Hofhansl | 0.94.27 | April 2017 |
+| 0.96 | Michael Stack | 0.96.2 | September 2014 |
+| 0.98 | Andrew Purtell | 0.98.24 | April 2017 |
+| 1.0 | Enis Soztutar | 1.0.3 | January 2016 |
+| 1.1 | Nick Dimiduk | 1.1.13 | December 2017 |
+| 1.2 | Sean Busbey | 1.2.12 | June 2019 |
+| 1.3 | Mikhail Antonov | 1.3.6 | August 2020 |
+| 1.4 | Andrew Purtell | 1.4.14 | October 2021 |
+| 1.5 | Andrew Purtell | 1.5.0 | October 2019 |
+| 1.6 | Andrew Purtell | 1.6.0 | February 2020 |
+| 1.7 | Reid Chan | 1.7.2 | August 2022 |
+| 2.0 | Michael Stack | 2.0.6 | September 2019 |
+| 2.1 | Duo Zhang | 2.1.10 | May 2020 |
+| 2.2 | Guanghao Zhang | 2.2.7 | April 2021 |
+| 2.3 | Nick Dimiduk | 2.3.7 | October 2021 |
+| 2.4 | Andrew Purtell | 2.4.18 | June 2024 |
+| 2.5 | Andrew Purtell | Check the [download](/downloads) page | **NOT YET** |
+| 2.6 | Bryan Beaudreault | Check the [download](/downloads) page | **NOT YET** |
+
+## Commit Message format
+
+We agreed to the following Git commit message format:
+
+```text
+HBASE-xxxxx . ()
+```
+
+If the person making the commit is the contributor, leave off the '(\)' element.
diff --git a/src/main/asciidoc/_chapters/compression.adoc b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/compression.mdx
similarity index 52%
rename from src/main/asciidoc/_chapters/compression.adoc
rename to hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/compression.mdx
index dc61f782c973..531f229b2487 100644
--- a/src/main/asciidoc/_chapters/compression.adoc
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/compression.mdx
@@ -1,35 +1,13 @@
-////
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-////
-
-[appendix]
-[[compression]]
-== Compression and Data Block Encoding In HBase(((Compression,Data BlockEncoding)))
-:doctype: book
-:numbered:
-:toc: left
-:icons: font
-:experimental:
-
-NOTE: Codecs mentioned in this section are for encoding and decoding data blocks or row keys.
-For information about replication codecs, see <>.
+---
+title: "Compression and Data Block Encoding In HBase"
+description: "Comprehensive guide to compression algorithms and data block encoding options in HBase for optimizing storage and performance."
+---
+
+
+ Codecs mentioned in this section are for encoding and decoding data blocks or row keys. For
+ information about replication codecs, see
+ [cluster.replication.preserving.tags](/docs/operational-management/cluster-replication#life-of-a-wal-edit).
+
HBase supports several different compression algorithms which can be enabled on a ColumnFamily.
Data block encoding attempts to limit duplication of information in keys, taking advantage of some of the fundamental designs and patterns of HBase, such as sorted row keys and the schema of a given table.
@@ -37,194 +15,186 @@ Compressors reduce the size of large, opaque byte arrays in cells, and can signi
Compressors and data block encoding can be used together on the same ColumnFamily.
-.Changes Take Effect Upon Compaction
+## Changes Take Effect Upon Compaction
+
If you change compression or encoding for a ColumnFamily, the changes take effect during compaction.
Some codecs take advantage of capabilities built into Java, such as GZip compression.
Others rely on native libraries. Native libraries may be available via codec dependencies installed into
HBase's library directory, or, if you are utilizing Hadoop codecs, as part of Hadoop. Hadoop codecs
typically have a native code component so follow instructions for installing Hadoop native binary
-support at <>.
+support at [Making use of Hadoop Native Libraries in HBase](/docs/compression#making-use-of-hadoop-native-libraries-in-hbase).
This section discusses common codecs that are used and tested with HBase.
No matter what codec you use, be sure to test that it is installed correctly and is available on all nodes in your cluster.
Extra operational steps may be necessary to be sure that codecs are available on newly-deployed nodes.
-You can use the <> utility to check that a given codec is correctly installed.
-
-To configure HBase to use a compressor, see <>.
-To enable a compressor for a ColumnFamily, see <>.
-To enable data block encoding for a ColumnFamily, see <>.
-
-.Block Compressors
-* NONE
-+
-This compression type constant selects no compression, and is the default.
-* BROTLI
-+
-https://en.wikipedia.org/wiki/Brotli[Brotli] is a generic-purpose lossless compression algorithm
-that compresses data using a combination of a modern variant of the LZ77 algorithm, Huffman
-coding, and 2nd order context modeling, with a compression ratio comparable to the best currently
-available general-purpose compression methods. It is similar in speed with GZ but offers more
-dense compression.
-* BZIP2
-+
-https://en.wikipedia.org/wiki/Bzip2[Bzip2] compresses files using the Burrows-Wheeler block
-sorting text compression algorithm and Huffman coding. Compression is generally considerably
-better than that achieved by the dictionary- (LZ-) based compressors, but both compression and
-decompression can be slow in comparison to other options.
-* GZ
-+
-gzip is based on the https://en.wikipedia.org/wiki/Deflate[DEFLATE] algorithm, which is a
-combination of LZ77 and Huffman coding. It is universally available in the Java Runtime
-Environment so is a good lowest common denominator option. However in comparison to more modern
-algorithms like Zstandard it is quite slow.
-* LZ4
-+
-https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)[LZ4] is a lossless data compression
-algorithm that is focused on compression and decompression speed. It belongs to the LZ77 family
-of compression algorithms, like Brotli, DEFLATE, Zstandard, and others. In our microbenchmarks
-LZ4 is the fastest option for both compression and decompression in that family, and is our
-universally recommended option.
-* LZMA
-+
-https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm[LZMA] is a
-dictionary compression scheme somewhat similar to the LZ77 algorithm that achieves very high
-compression ratios with a computationally expensive predictive model and variable size
-compression dictionary, while still maintaining decompression speed similar to other commonly used
-compression algorithms. LZMA is superior to all other options in general compression ratio but as
-a compressor it can be extremely slow, especially when configured to operate at higher levels of
-compression.
-* LZO
-+
-https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Oberhumer[LZO] is another LZ-variant
-data compression algorithm, with an implementation focused on decompression speed. It is almost
-but not quite as fast as LZ4.
-* SNAPPY
-+
-https://en.wikipedia.org/wiki/Snappy_(compression)[Snappy] is based on ideas from LZ77 but is
-optimized for very high compression speed, achieving only a "reasonable" compression in trade.
-It is as fast as LZ4 but does not compress quite as well. We offer a pure Java Snappy codec
-that can be used instead of GZ as the universally available option for any Java runtime on any
-hardware architecture.
-* ZSTD
-+
-https://en.wikipedia.org/wiki/Zstd[Zstandard] combines a dictionary-matching stage (LZ77) with
-a large search window and a fast entropy coding stage, using both Finite State Entropy and
-Huffman coding. Compression speed can vary by a factor of 20 or more between the fastest and
-slowest levels, while decompression is uniformly fast, varying by less than 20% between the
-fastest and slowest levels.
-+
-ZStandard is the most flexible of the available compression codec options, offering a compression
-ratio similar to LZ4 at level 1 (but with slightly less performance), compression ratios
-comparable to DEFLATE at mid levels (but with better performance), and LZMA-alike dense
-compression (and LZMA-alike compression speeds) at high levels; while providing universally fast
-decompression.
-
-.Data Block Encoding Types
-Prefix::
- Often, keys are very similar. Specifically, keys often share a common prefix and only differ near the end. For instance, one key might be `RowKey:Family:Qualifier0` and the next key might be `RowKey:Family:Qualifier1`.
- +
-In Prefix encoding, an extra column is added which holds the length of the prefix shared between the current key and the previous key.
-Assuming the first key here is totally different from the key before, its prefix length is 0.
-+
+You can use the [compression.test](/docs/compression#compressiontest) utility to check that a given codec is correctly installed.
+
+To configure HBase to use a compressor, see [compressor.install](/docs/compression#compressor-configuration-installation-and-use).
+To enable a compressor for a ColumnFamily, see [changing.compression](/docs/compression#enable-compression-on-a-columnfamily).
+To enable data block encoding for a ColumnFamily, see [data.block.encoding.enable](/docs/compression#enable-data-block-encoding).
+
+## Block Compressors
+
+- **NONE**
+ This compression type constant selects no compression, and is the default.
+- **BROTLI**
+ [Brotli](https://en.wikipedia.org/wiki/Brotli) is a generic-purpose lossless compression algorithm
+ that compresses data using a combination of a modern variant of the LZ77 algorithm, Huffman
+ coding, and 2nd order context modeling, with a compression ratio comparable to the best currently
+ available general-purpose compression methods. It is similar in speed with GZ but offers more
+ dense compression.
+- **BZIP2**
+ [Bzip2](https://en.wikipedia.org/wiki/Bzip2) compresses files using the Burrows-Wheeler block
+ sorting text compression algorithm and Huffman coding. Compression is generally considerably
+ better than that achieved by the dictionary- (LZ-) based compressors, but both compression and
+ decompression can be slow in comparison to other options.
+- **GZ**
+ gzip is based on the [DEFLATE](https://en.wikipedia.org/wiki/Deflate) algorithm, which is a
+ combination of LZ77 and Huffman coding. It is universally available in the Java Runtime
+ Environment so is a good lowest common denominator option. However in comparison to more modern
+ algorithms like Zstandard it is quite slow.
+- **LZ4**
+ [LZ4]() is a lossless data compression
+ algorithm that is focused on compression and decompression speed. It belongs to the LZ77 family
+ of compression algorithms, like Brotli, DEFLATE, Zstandard, and others. In our microbenchmarks
+ LZ4 is the fastest option for both compression and decompression in that family, and is our
+ universally recommended option.
+- **LZMA**
+ [LZMA](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Markov_chain_algorithm) is a
+ dictionary compression scheme somewhat similar to the LZ77 algorithm that achieves very high
+ compression ratios with a computationally expensive predictive model and variable size
+ compression dictionary, while still maintaining decompression speed similar to other commonly used
+ compression algorithms. LZMA is superior to all other options in general compression ratio but as
+ a compressor it can be extremely slow, especially when configured to operate at higher levels of
+ compression.
+- **LZO**
+ [LZO](https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Oberhumer) is another LZ-variant
+ data compression algorithm, with an implementation focused on decompression speed. It is almost
+ but not quite as fast as LZ4.
+- **SNAPPY**
+ [Snappy]() is based on ideas from LZ77 but is
+ optimized for very high compression speed, achieving only a "reasonable" compression in trade.
+ It is as fast as LZ4 but does not compress quite as well. We offer a pure Java Snappy codec
+ that can be used instead of GZ as the universally available option for any Java runtime on any
+ hardware architecture.
+- **ZSTD**
+ [Zstandard](https://en.wikipedia.org/wiki/Zstd) combines a dictionary-matching stage (LZ77) with
+ a large search window and a fast entropy coding stage, using both Finite State Entropy and
+ Huffman coding. Compression speed can vary by a factor of 20 or more between the fastest and
+ slowest levels, while decompression is uniformly fast, varying by less than 20% between the
+ fastest and slowest levels.
+ ZStandard is the most flexible of the available compression codec options, offering a compression
+ ratio similar to LZ4 at level 1 (but with slightly less performance), compression ratios
+ comparable to DEFLATE at mid levels (but with better performance), and LZMA-alike dense
+ compression (and LZMA-alike compression speeds) at high levels; while providing universally fast
+ decompression.
+
+## Data Block Encoding Types
+
+### Prefix
+
+Often, keys are very similar. Specifically, keys often share a common prefix and only differ near the end. For instance, one key might be `RowKey:Family:Qualifier0` and the next key might be `RowKey:Family:Qualifier1`.
+In Prefix encoding, an extra column is added which holds the length of the prefix shared between the current key and the previous key. Assuming the first key here is totally different from the key before, its prefix length is 0.
+
The second key's prefix length is `23`, since they have the first 23 characters in common.
-+
+
Obviously if the keys tend to have nothing in common, Prefix will not provide much benefit.
-+
+
The following image shows a hypothetical ColumnFamily with no data block encoding.
-+
-.ColumnFamily with No Encoding
-image::data_block_no_encoding.png[]
-+
+
+
+
Here is the same data with prefix data encoding.
-+
-.ColumnFamily with Prefix Encoding
-image::data_block_prefix_encoding.png[]
-
-Diff::
- Diff encoding expands upon Prefix encoding.
- Instead of considering the key sequentially as a monolithic series of bytes, each key field is split so that each part of the key can be compressed more efficiently.
-+
+
+
+
+### Diff
+
+Diff encoding expands upon Prefix encoding.
+Instead of considering the key sequentially as a monolithic series of bytes, each key field is split so that each part of the key can be compressed more efficiently.
+
Two new fields are added: timestamp and type.
-+
+
If the ColumnFamily is the same as the previous row, it is omitted from the current row.
-+
+
If the key length, value length or type are the same as the previous row, the field is omitted.
-+
+
In addition, for increased compression, the timestamp is stored as a Diff from the previous row's timestamp, rather than being stored in full.
Given the two row keys in the Prefix example, and given an exact match on timestamp and the same type, neither the value length, or type needs to be stored for the second row, and the timestamp value for the second row is just 0, rather than a full timestamp.
-+
+
Diff encoding is disabled by default because writing and scanning are slower but more data is cached.
-+
+
This image shows the same ColumnFamily from the previous images, with Diff encoding.
-+
-.ColumnFamily with Diff Encoding
-image::data_block_diff_encoding.png[]
-Fast Diff::
- Fast Diff works similar to Diff, but uses a faster implementation. It also adds another field which stores a single bit to track whether the data itself is the same as the previous row. If it is, the data is not stored again.
-+
+
+
+### Fast Diff
+
+Fast Diff works similar to Diff, but uses a faster implementation. It also adds another field which stores a single bit to track whether the data itself is the same as the previous row. If it is, the data is not stored again.
+
Fast Diff is the recommended codec to use if you have long keys or many columns.
-+
+
The data format is nearly identical to Diff encoding, so there is not an image to illustrate it.
+### Prefix Tree
-Prefix Tree::
- Prefix tree encoding was introduced as an experimental feature in HBase 0.96.
- It provides similar memory savings to the Prefix, Diff, and Fast Diff encoder, but provides faster random access at a cost of slower encoding speed.
- It was removed in hbase-2.0.0. It was a good idea but little uptake. If interested in reviving this effort, write the hbase dev list.
+Prefix tree encoding was introduced as an experimental feature in HBase 0.96.
+It provides similar memory savings to the Prefix, Diff, and Fast Diff encoder, but provides faster random access at a cost of slower encoding speed.
+It was removed in hbase-2.0.0. It was a good idea but little uptake. If interested in reviving this effort, write the hbase dev list.
-[[data.block.encoding.types]]
-=== Which Compressor or Data Block Encoder To Use
+## Which Compressor or Data Block Encoder To Use
The compression or codec type to use depends on the characteristics of your data. Choosing the wrong type could cause your data to take more space rather than less, and can have performance implications.
-In general, you need to weigh your options between smaller size and faster compression/decompression. Following are some general guidelines, expanded from a discussion at link:https://lists.apache.org/thread.html/481e67a61163efaaf4345510447a9244871a8d428244868345a155ff%401378926618%40%3Cdev.hbase.apache.org%3E[Documenting Guidance on compression and codecs].
+In general, you need to weigh your options between smaller size and faster compression/decompression. Following are some general guidelines, expanded from a discussion at [Documenting Guidance on compression and codecs](https://lists.apache.org/thread.html/481e67a61163efaaf4345510447a9244871a8d428244868345a155ff%401378926618%40%3Cdev.hbase.apache.org%3E).
-* In most cases, enabling LZ4 or Snappy by default is a good choice, because they have a low
+- In most cases, enabling LZ4 or Snappy by default is a good choice, because they have a low
performance overhead and provide reasonable space savings. A fast compression algorithm almost
always improves overall system performance by trading some increased CPU usage for better I/O
efficiency.
-* If the values are large (and not pre-compressed, such as images), use a data block compressor.
-* For [firstterm]_cold data_, which is accessed infrequently, depending on your use case, it might
+- If the values are large (and not pre-compressed, such as images), use a data block compressor.
+- For _cold data_, which is accessed infrequently, depending on your use case, it might
make sense to opt for Zstandard at its higher compression levels, or LZMA, especially for high
entropy binary data, or Brotli for data similar in characteristics to web data. Bzip2 might also
be a reasonable option but Zstandard is very likely to offer superior decompression speed.
-* For [firstterm]_hot data_, which is accessed frequently, you almost certainly want only LZ4,
+- For _hot data_, which is accessed frequently, you almost certainly want only LZ4,
Snappy, LZO, or Zstandard at a low compression level. These options will not provide as high of
a compression ratio but will in trade not unduly impact system performance.
-* If you have long keys (compared to the values) or many columns, use a prefix encoder.
+- If you have long keys (compared to the values) or many columns, use a prefix encoder.
FAST_DIFF is recommended.
-* If enabling WAL value compression, consider LZ4 or SNAPPY compression, or Zstandard at
+- If enabling WAL value compression, consider LZ4 or SNAPPY compression, or Zstandard at
level 1. Reading and writing the WAL is performance critical. That said, the I/O
savings of these compression options can improve overall system performance.
-[[hadoop.native.lib]]
-=== Making use of Hadoop Native Libraries in HBase
+## Making use of Hadoop Native Libraries in HBase
-The Hadoop shared library has a bunch of facility including compression libraries and fast crc'ing -- hardware crc'ing if your chipset supports it.
+The Hadoop shared library has a bunch of facility including compression libraries and fast crc'ing — hardware crc'ing if your chipset supports it.
To make this facility available to HBase, do the following. HBase/Hadoop will fall back to use alternatives if it cannot find the native library
-versions -- or fail outright if you asking for an explicit compressor and there is no alternative available.
+versions — or fail outright if you asking for an explicit compressor and there is no alternative available.
First make sure of your Hadoop. Fix this message if you are seeing it starting Hadoop processes:
-----
+
+```
16/02/09 22:40:24 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
-----
+```
+
It means is not properly pointing at its native libraries or the native libs were compiled for another platform.
Fix this first.
Then if you see the following in your HBase logs, you know that HBase was unable to locate the Hadoop native libraries:
-[source]
-----
+
+```
2014-08-07 09:26:20,139 WARN [main] util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
-----
+```
+
If the libraries loaded successfully, the WARN message does not show. Usually this means you are good to go but read on.
Let's presume your Hadoop shipped with a native library that suits the platform you are running HBase on.
-To check if the Hadoop native library is available to HBase, run the following tool (available in Hadoop 2.1 and greater):
-[source]
-----
+To check if the Hadoop native library is available to HBase, run the following tool (available in Hadoop 2.1 and greater):
+
+```bash
$ ./bin/hbase --config ~/conf_hbase org.apache.hadoop.util.NativeLibraryChecker
2014-08-26 13:15:38,717 WARN [main] util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Native library checking:
@@ -234,37 +204,40 @@ snappy: false
lz4: false
bzip2: false
2014-08-26 13:15:38,863 INFO [main] util.ExitUtil: Exiting with status 1
-----
+```
+
Above shows that the native hadoop library is not available in HBase context.
The above NativeLibraryChecker tool may come back saying all is hunky-dory
--- i.e. all libs show 'true', that they are available -- but follow the below
+— i.e. all libs show 'true', that they are available — but follow the below
presecription anyways to ensure the native libs are available in HBase context,
when it goes to use them.
To fix the above, either copy the Hadoop native libraries local or symlink to them if the Hadoop and HBase stalls are adjacent in the filesystem.
You could also point at their location by setting the `LD_LIBRARY_PATH` environment variable in your hbase-env.sh.
-Where the JVM looks to find native libraries is "system dependent" (See `java.lang.System#loadLibrary(name)`). On linux, by default, is going to look in _lib/native/PLATFORM_ where `PLATFORM` is the label for the platform your HBase is installed on.
+Where the JVM looks to find native libraries is "system dependent" (See `java.lang.System#loadLibrary(name)`). On linux, by default, is going to look in _lib/native/PLATFORM_ where `PLATFORM` is the label for the platform your HBase is installed on.
On a local linux machine, it seems to be the concatenation of the java properties `os.name` and `os.arch` followed by whether 32 or 64 bit.
HBase on startup prints out all of the java system properties so find the os.name and os.arch in the log.
For example:
-[source]
-----
+
+```
...
2014-08-06 15:27:22,853 INFO [main] zookeeper.ZooKeeper: Client environment:os.name=Linux
2014-08-06 15:27:22,853 INFO [main] zookeeper.ZooKeeper: Client environment:os.arch=amd64
...
-----
+```
+
So in this case, the PLATFORM string is `Linux-amd64-64`.
-Copying the Hadoop native libraries or symlinking at _lib/native/Linux-amd64-64_ will ensure they are found.
+Copying the Hadoop native libraries or symlinking at _lib/native/Linux-amd64-64_ will ensure they are found.
Rolling restart after you have made this change.
Here is an example of how you would set up the symlinks.
Let the hadoop and hbase installs be in your home directory. Assume your hadoop native libs
are at ~/hadoop/lib/native. Assume you are on a Linux-amd64-64 platform. In this case,
you would do the following to link the hadoop native lib so hbase could find them.
-----
+
+```bash
...
$ mkdir -p ~/hbaseLinux-amd64-64 -> /home/stack/hadoop/lib/native/lib/native/
$ cd ~/hbase/lib/native/
@@ -272,19 +245,21 @@ $ ln -s ~/hadoop/lib/native Linux-amd64-64
$ ls -la
# Linux-amd64-64 -> /home/USER/hadoop/lib/native
...
-----
+```
If you see PureJavaCrc32C in a stack track or if you see something like the below in a perf trace, then native is not working; you are using the java CRC functions rather than native:
-----
+
+```
5.02% perf-53601.map [.] Lorg/apache/hadoop/util/PureJavaCrc32C;.update
-----
-See link:https://issues.apache.org/jira/browse/HBASE-11927[HBASE-11927 Use Native Hadoop Library for HFile checksum (And flip default from CRC32 to CRC32C)],
+```
+
+See [HBASE-11927 Use Native Hadoop Library for HFile checksum (And flip default from CRC32 to CRC32C)](https://issues.apache.org/jira/browse/HBASE-11927),
for more on native checksumming support. See in particular the release note for how to check if your hardware to see if your processor has support for hardware CRCs.
-Or checkout the Apache link:https://blogs.apache.org/hbase/entry/saving_cpu_using_native_hadoop[Checksums in HBase] blog post.
+Or checkout the Apache [Checksums in HBase](https://blogs.apache.org/hbase/entry/saving_cpu_using_native_hadoop) blog post.
+
+Here is example of how to point at the Hadoop libs with `LD_LIBRARY_PATH` environment variable:
-Here is example of how to point at the Hadoop libs with `LD_LIBRARY_PATH` environment variable:
-[source]
-----
+```bash
$ LD_LIBRARY_PATH=~/hadoop-2.5.0-SNAPSHOT/lib/native ./bin/hbase --config ~/conf_hbase org.apache.hadoop.util.NativeLibraryChecker
2014-08-26 13:42:49,332 INFO [main] bzip2.Bzip2Factory: Successfully loaded & initialized native-bzip2 library system-native
2014-08-26 13:42:49,337 INFO [main] zlib.ZlibFactory: Successfully loaded & initialized native-zlib library
@@ -294,13 +269,13 @@ zlib: true /lib64/libz.so.1
snappy: true /usr/lib64/libsnappy.so.1
lz4: true revision:99
bzip2: true /lib64/libbz2.so.1
-----
+```
+
Set in _hbase-env.sh_ the LD_LIBRARY_PATH environment variable when starting your HBase.
-=== Compressor Configuration, Installation, and Use
+## Compressor Configuration, Installation, and Use
-[[compressor.install]]
-==== Configure HBase For Compressors
+### Configure HBase For Compressors
Compression codecs are provided either by HBase compressor modules or by Hadoop's native compression
support. As described above you choose a compression type in table or column family schema or in
@@ -308,71 +283,51 @@ site configuration using its short label, e.g. _snappy_ for Snappy, or _zstd_ fo
codec implementation is dynamically loaded to support what label is configurable by way of site
configuration.
-[options="header"]
-|===
-|Algorithm label|Codec implementation configuration key|Default value
-//----------------------
-|BROTLI|hbase.io.compress.brotli.codec|org.apache.hadoop.hbase.io.compress.brotli.BrotliCodec
-|BZIP2|hbase.io.compress.bzip2.codec|org.apache.hadoop.io.compress.BZip2Codec
-|GZ|hbase.io.compress.gz.codec|org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec
-|LZ4|hbase.io.compress.lz4.codec|org.apache.hadoop.io.compress.Lz4Codec
-|LZMA|hbase.io.compress.lzma.codec|org.apache.hadoop.hbase.io.compress.xz.LzmaCodec
-|LZO|hbase.io.compress.lzo.codec|com.hadoop.compression.lzo.LzoCodec
-|SNAPPY|hbase.io.compress.snappy.codec|org.apache.hadoop.io.compress.SnappyCodec
-|ZSTD|hbase.io.compress.zstd.codec|org.apache.hadoop.io.compress.ZStandardCodec
-|===
+| Algorithm label | Codec implementation configuration key | Default value |
+| --------------- | -------------------------------------- | ----------------------------------------------------------- |
+| BROTLI | hbase.io.compress.brotli.codec | org.apache.hadoop.hbase.io.compress.brotli.BrotliCodec |
+| BZIP2 | hbase.io.compress.bzip2.codec | org.apache.hadoop.io.compress.BZip2Codec |
+| GZ | hbase.io.compress.gz.codec | org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec |
+| LZ4 | hbase.io.compress.lz4.codec | org.apache.hadoop.io.compress.Lz4Codec |
+| LZMA | hbase.io.compress.lzma.codec | org.apache.hadoop.hbase.io.compress.xz.LzmaCodec |
+| LZO | hbase.io.compress.lzo.codec | com.hadoop.compression.lzo.LzoCodec |
+| SNAPPY | hbase.io.compress.snappy.codec | org.apache.hadoop.io.compress.SnappyCodec |
+| ZSTD | hbase.io.compress.zstd.codec | org.apache.hadoop.io.compress.ZStandardCodec |
The available codec implementation options are:
-[options="header"]
-|===
-|Label|Codec implementation class|Notes
-//----------------------
-|BROTLI|org.apache.hadoop.hbase.io.compress.brotli.BrotliCodec|
- Implemented with https://github.com/hyperxpro/Brotli4j[Brotli4j]
-|BZIP2|org.apache.hadoop.io.compress.BZip2Codec|Hadoop native codec
-|GZ|org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec|
- Requires the Hadoop native GZ codec
-|LZ4|org.apache.hadoop.io.compress.Lz4Codec|Hadoop native codec
-|LZ4|org.apache.hadoop.hbase.io.compress.aircompressor.Lz4Codec|
- Pure Java implementation
-|LZ4|org.apache.hadoop.hbase.io.compress.lz4.Lz4Codec|
- Implemented with https://github.com/lz4/lz4-java[lz4-java]
-|LZMA|org.apache.hadoop.hbase.io.compress.xz.LzmaCodec|
- Implemented with https://tukaani.org/xz/java.html[XZ For Java]
-|LZO|com.hadoop.compression.lzo.LzoCodec|Hadoop native codec,
- requires GPL licensed native dependencies
-|LZO|org.apache.hadoop.io.compress.LzoCodec|Hadoop native codec,
- requires GPL licensed native dependencies
-|LZO|org.apache.hadoop.hbase.io.compress.aircompressor.LzoCodec|
- Pure Java implementation
-|SNAPPY|org.apache.hadoop.io.compress.SnappyCodec|Hadoop native codec
-|SNAPPY|org.apache.hadoop.hbase.io.compress.aircompressor.SnappyCodec|
- Pure Java implementation
-|SNAPPY|org.apache.hadoop.hbase.io.compress.xerial.SnappyCodec|
- Implemented with https://github.com/xerial/snappy-java[snappy-java]
-|ZSTD|org.apache.hadoop.io.compress.ZStandardCodec|Hadoop native codec
-|ZSTD|org.apache.hadoop.hbase.io.compress.aircompressor.ZstdCodec|
- Pure Java implementation, limited to a fixed compression level,
- not data compatible with the Hadoop zstd codec
-|ZSTD|org.apache.hadoop.hbase.io.compress.zstd.ZstdCodec|
- Implemented with https://github.com/luben/zstd-jni[zstd-jni],
- supports all compression levels, supports custom dictionaries
-|===
+| Label | Codec implementation class | Notes |
+| ------ | ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| BROTLI | org.apache.hadoop.hbase.io.compress.brotli.BrotliCodec | Implemented with [Brotli4j](https://github.com/hyperxpro/Brotli4j) |
+| BZIP2 | org.apache.hadoop.io.compress.BZip2Codec | Hadoop native codec |
+| GZ | org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec | Requires the Hadoop native GZ codec |
+| LZ4 | org.apache.hadoop.io.compress.Lz4Codec | Hadoop native codec |
+| LZ4 | org.apache.hadoop.hbase.io.compress.aircompressor.Lz4Codec | Pure Java implementation |
+| LZ4 | org.apache.hadoop.hbase.io.compress.lz4.Lz4Codec | Implemented with [lz4-java](https://github.com/lz4/lz4-java) |
+| LZMA | org.apache.hadoop.hbase.io.compress.xz.LzmaCodec | Implemented with [XZ For Java](https://tukaani.org/xz/java.html) |
+| LZO | com.hadoop.compression.lzo.LzoCodec | Hadoop native codec, requires GPL licensed native dependencies |
+| LZO | org.apache.hadoop.io.compress.LzoCodec | Hadoop native codec, requires GPL licensed native dependencies |
+| LZO | org.apache.hadoop.hbase.io.compress.aircompressor.LzoCodec | Pure Java implementation |
+| SNAPPY | org.apache.hadoop.io.compress.SnappyCodec | Hadoop native codec |
+| SNAPPY | org.apache.hadoop.hbase.io.compress.aircompressor.SnappyCodec | Pure Java implementation |
+| SNAPPY | org.apache.hadoop.hbase.io.compress.xerial.SnappyCodec | Implemented with [snappy-java](https://github.com/xerial/snappy-java) |
+| ZSTD | org.apache.hadoop.io.compress.ZStandardCodec | Hadoop native codec |
+| ZSTD | org.apache.hadoop.hbase.io.compress.aircompressor.ZstdCodec | Pure Java implementation, limited to a fixed compression level, not data compatible with the Hadoop zstd codec |
+| ZSTD | org.apache.hadoop.hbase.io.compress.zstd.ZstdCodec | Implemented with [zstd-jni](https://github.com/luben/zstd-jni), supports all compression levels, supports custom dictionaries |
Specify which codec implementation option you prefer for a given compression algorithm
in site configuration, like so:
-[source]
-----
+
+```xml
...
hbase.io.compress.lz4.codecorg.apache.hadoop.hbase.io.compress.lz4.Lz4Codec
...
-----
+```
-.Compressor Microbenchmarks
+#### Compressor Microbenchmarks
See https://github.com/apurtell/jmh-compression-tests
@@ -388,63 +343,58 @@ block encoding and actual persistence.
These are the results:
-[options="header"]
-|===
-|Codec|Level|Time (milliseconds)|Result (bytes)|Improvement
-//----------------------
-|AirCompressor LZ4|-|349.989 ± 2.835|76,999,408|70.17%
-|AirCompressor LZO|-|334.554 ± 3.243|79,369,805|69.25%
-|AirCompressor Snappy|-|364.153 ± 19.718|80,201,763|68.93%
-|AirCompressor Zstandard|3 (effective)|1108.267 ± 8.969|55,129,189|78.64%
-|Brotli|1|593.107 ± 2.376|58,672,319|77.27%
-|Brotli|3|1345.195 ± 27.327|53,917,438|79.11%
-|Brotli|6|2812.411 ± 25.372|48,696,441|81.13%
-|Brotli|10|74615.936 ± 224.854|44,970,710|82.58%
-|LZ4 (lz4-java)|-|303.045 ± 0.783|76,974,364|70.18%
-|LZMA|1|6410.428 ± 115.065|49,948,535|80.65%
-|LZMA|3|8144.620 ± 152.119|49,109,363|80.97%
-|LZMA|6|43802.576 ± 382.025|46,951,810|81.81%
-|LZMA|9|49821.979 ± 580.110|46,951,810|81.81%
-|Snappy (xerial)|-|360.225 ± 2.324|80,749,937|68.72%
-|Zstd (zstd-jni)|1|654.699 ± 16.839|56,719,994|78.03%
-|Zstd (zstd-jni)|3|839.160 ± 24.906|54,573,095|78.86%
-|Zstd (zstd-jni)|5|1594.373 ± 22.384|52,025,485|79.84%
-|Zstd (zstd-jni)|7|2308.705 ± 24.744|50,651,554|80.38%
-|Zstd (zstd-jni)|9|3659.677 ± 58.018|50,208,425|80.55%
-|Zstd (zstd-jni)|12|8705.294 ± 58.080|49,841,446|80.69%
-|Zstd (zstd-jni)|15|19785.646 ± 278.080|48,499,508|81.21%
-|Zstd (zstd-jni)|18|47702.097 ± 442.670|48,319,879|81.28%
-|Zstd (zstd-jni)|22|97799.695 ± 1106.571|48,212,220|81.32%
-|===
-
-.Compressor Support On the Master
+| Codec | Level | Time (milliseconds) | Result (bytes) | Improvement |
+| ----------------------- | ------------- | -------------------- | -------------- | ----------- |
+| AirCompressor LZ4 | - | 349.989 ± 2.835 | 76,999,408 | 70.17% |
+| AirCompressor LZO | - | 334.554 ± 3.243 | 79,369,805 | 69.25% |
+| AirCompressor Snappy | - | 364.153 ± 19.718 | 80,201,763 | 68.93% |
+| AirCompressor Zstandard | 3 (effective) | 1108.267 ± 8.969 | 55,129,189 | 78.64% |
+| Brotli | 1 | 593.107 ± 2.376 | 58,672,319 | 77.27% |
+| Brotli | 3 | 1345.195 ± 27.327 | 53,917,438 | 79.11% |
+| Brotli | 6 | 2812.411 ± 25.372 | 48,696,441 | 81.13% |
+| Brotli | 10 | 74615.936 ± 224.854 | 44,970,710 | 82.58% |
+| LZ4 (lz4-java) | - | 303.045 ± 0.783 | 76,974,364 | 70.18% |
+| LZMA | 1 | 6410.428 ± 115.065 | 49,948,535 | 80.65% |
+| LZMA | 3 | 8144.620 ± 152.119 | 49,109,363 | 80.97% |
+| LZMA | 6 | 43802.576 ± 382.025 | 46,951,810 | 81.81% |
+| LZMA | 9 | 49821.979 ± 580.110 | 46,951,810 | 81.81% |
+| Snappy (xerial) | - | 360.225 ± 2.324 | 80,749,937 | 68.72% |
+| Zstd (zstd-jni) | 1 | 654.699 ± 16.839 | 56,719,994 | 78.03% |
+| Zstd (zstd-jni) | 3 | 839.160 ± 24.906 | 54,573,095 | 78.86% |
+| Zstd (zstd-jni) | 5 | 1594.373 ± 22.384 | 52,025,485 | 79.84% |
+| Zstd (zstd-jni) | 7 | 2308.705 ± 24.744 | 50,651,554 | 80.38% |
+| Zstd (zstd-jni) | 9 | 3659.677 ± 58.018 | 50,208,425 | 80.55% |
+| Zstd (zstd-jni) | 12 | 8705.294 ± 58.080 | 49,841,446 | 80.69% |
+| Zstd (zstd-jni) | 15 | 19785.646 ± 278.080 | 48,499,508 | 81.21% |
+| Zstd (zstd-jni) | 18 | 47702.097 ± 442.670 | 48,319,879 | 81.28% |
+| Zstd (zstd-jni) | 22 | 97799.695 ± 1106.571 | 48,212,220 | 81.32% |
+
+#### Compressor Support On the Master
A new configuration setting was introduced in HBase 0.95, to check the Master to determine which data block encoders are installed and configured on it, and assume that the entire cluster is configured the same.
This option, `hbase.master.check.compression`, defaults to `true`.
-This prevents the situation described in link:https://issues.apache.org/jira/browse/HBASE-6370[HBASE-6370], where a table is created or modified to support a codec that a region server does not support, leading to failures that take a long time to occur and are difficult to debug.
+This prevents the situation described in [HBASE-6370](https://issues.apache.org/jira/browse/HBASE-6370), where a table is created or modified to support a codec that a region server does not support, leading to failures that take a long time to occur and are difficult to debug.
If `hbase.master.check.compression` is enabled, libraries for all desired compressors need to be installed and configured on the Master, even if the Master does not run a region server.
-.Install GZ Support Via Native Libraries
+#### Install GZ Support Via Native Libraries
HBase uses Java's built-in GZip support unless the native Hadoop libraries are available on the CLASSPATH.
The recommended way to add libraries to the CLASSPATH is to set the environment variable `HBASE_LIBRARY_PATH` for the user running HBase.
If native libraries are not available and Java's GZIP is used, `Got brand-new compressor` reports will be present in the logs.
-See <>).
+See [brand.new.compressor](/docs/troubleshooting#logs-flooded-with-2011-01-10-124048407-info-orgapachehadoopiocompresscodecpool-gotbrand-new-compressor-messages)).
-[[lzo.compression]]
-.Install Hadoop Native LZO Support
+#### Install Hadoop Native LZO Support
HBase cannot ship with the Hadoop native LZO codc because of incompatibility between HBase, which uses an Apache Software License (ASL) and LZO, which uses a GPL license.
-See the link:https://github.com/twitter/hadoop-lzo/blob/master/README.md[Hadoop-LZO at Twitter] for information on configuring LZO support for HBase.
+See the [Hadoop-LZO at Twitter](https://github.com/twitter/hadoop-lzo/blob/master/README.md) for information on configuring LZO support for HBase.
If you depend upon LZO compression, consider using the pure Java and ASL licensed
AirCompressor LZO codec option instead of the Hadoop native default, or configure your
RegionServers to fail to start if native LZO support is not available.
-See <>.
+See [hbase.regionserver.codecs](/docs/compression#enforce-compression-settings-on-a-regionserver).
-[[lz4.compression]]
-.Configure Hadoop Native LZ4 Support
+#### Configure Hadoop Native LZ4 Support
LZ4 support is bundled with Hadoop and is the default LZ4 codec implementation.
It is not required that you make use of the Hadoop LZ4 codec. Our LZ4 codec implemented
@@ -453,24 +403,25 @@ pure Java option for use where native support is not available.
That said, if you prefer the Hadoop option, make sure the hadoop shared library
(libhadoop.so) is accessible when you start HBase.
-After configuring your platform (see <>), you can
+After configuring your platform (see [hadoop.native.lib](/docs/compression#making-use-of-hadoop-native-libraries-in-hbase)), you can
make a symbolic link from HBase to the native Hadoop libraries. This assumes the two
software installs are colocated. For example, if my 'platform' is Linux-amd64-64:
-[source,bourne]
-----
+
+```bash
$ cd $HBASE_HOME
$ mkdir lib/native
$ ln -s $HADOOP_HOME/lib/native lib/native/Linux-amd64-64
-----
+```
+
Use the compression tool to check that LZ4 is installed on all nodes.
Start up (or restart) HBase.
Afterward, you can create and alter tables to enable LZ4 as a compression codec.:
-----
+
+```ruby
hbase(main):003:0> alter 'TestTable', {NAME => 'info', COMPRESSION => 'LZ4'}
-----
+```
-[[snappy.compression.installation]]
-.Install Hadoop native Snappy Support
+#### Install Hadoop native Snappy Support
Snappy support is bundled with Hadoop and is the default Snappy codec implementation.
It is not required that you make use of the Hadoop Snappy codec. Our Snappy codec
@@ -485,54 +436,52 @@ If you built from source, copy the shared library to a known location on your sy
In addition to the Snappy library, HBase also needs access to the Hadoop shared library, which will be called something like _libhadoop.so.X.Y_, where X and Y are both numbers.
Make note of the location of the Hadoop library, or copy it to the same location as the Snappy library.
-[NOTE]
-====
+
The Snappy and Hadoop libraries need to be available on each node of your cluster.
-See <> to find out how to test that this is the case.
+See [compression.test](/docs/compression#compressiontest) to find out how to test that this is the case.
+
+See [hbase.regionserver.codecs](/docs/compression#enforce-compression-settings-on-a-regionserver) to configure your RegionServers to fail to start if a given compressor is not available.
-See <> to configure your RegionServers to fail to start if a given compressor is not available.
-====
+
Each of these library locations need to be added to the environment variable `HBASE_LIBRARY_PATH` for the operating system user that runs HBase.
You need to restart the RegionServer for the changes to take effect.
-[[compression.test]]
-.CompressionTest
+#### CompressionTest
You can use the CompressionTest tool to verify that your compressor is available to HBase:
-----
-
+```bash
$ hbase org.apache.hadoop.hbase.util.CompressionTest hdfs://host/path/to/hbase snappy
-----
+```
-[[hbase.regionserver.codecs]]
-.Enforce Compression Settings On a RegionServer
+#### Enforce Compression Settings On a RegionServer
You can configure a RegionServer so that it will fail to restart if compression is configured incorrectly, by adding the option hbase.regionserver.codecs to the _hbase-site.xml_, and setting its value to a comma-separated list of codecs that need to be available.
For example, if you set this property to `lzo,gz`, the RegionServer would fail to start if both compressors were not available.
This would prevent a new server from being added to the cluster without having codecs configured properly.
-[[changing.compression]]
-==== Enable Compression On a ColumnFamily
+### Enable Compression On a ColumnFamily
To enable compression for a ColumnFamily, use an `alter` command.
You do not need to re-create the table or copy data.
If you are changing codecs, be sure the old codec is still available until all the old StoreFiles have been compacted.
-.Enabling Compression on a ColumnFamily of an Existing Table using HBaseShell
-----
+#### Enabling Compression on a ColumnFamily of an Existing Table using HBaseShell
+
+```ruby
hbase> alter 'test', {NAME => 'cf', COMPRESSION => 'GZ'}
-----
+```
+
+#### Creating a New Table with Compression On a ColumnFamily
-.Creating a New Table with Compression On a ColumnFamily
-----
+```ruby
hbase> create 'test2', { NAME => 'cf2', COMPRESSION => 'SNAPPY' }
-----
+```
-.Verifying a ColumnFamily's Compression Settings
-----
+#### Verifying a ColumnFamily's Compression Settings
+```ruby
hbase> describe 'test'
DESCRIPTION ENABLED
'test', {NAME => 'cf', DATA_BLOCK_ENCODING => 'NONE false
@@ -542,15 +491,16 @@ DESCRIPTION ENABLED
lse', BLOCKSIZE => '65536', IN_MEMORY => 'false', B
LOCKCACHE => 'true'}
1 row(s) in 0.1070 seconds
-----
+```
-==== Testing Compression Performance
+### Testing Compression Performance
HBase includes a tool called LoadTestTool which provides mechanisms to test your compression performance.
You must specify either `-write` or `-update-read` as your first parameter, and if you do not specify another parameter, usage advice is printed for each option.
-.+LoadTestTool+ Usage
-----
+**`LoadTestTool` Usage**
+
+```bash
$ bin/hbase org.apache.hadoop.hbase.util.LoadTestTool -h
usage: bin/hbase org.apache.hadoop.hbase.util.LoadTestTool
Options:
@@ -587,7 +537,7 @@ Options:
parallely. -tn parameter value becomes table name
prefix. Each table name is in format
_1..._n
- -read [:<#threads=20>]
+ -read [:#threads=20]
-regions_per_server A positive integer number. When a number n is
specified, load test tool will create the test
table with n regions per server
@@ -596,40 +546,42 @@ Options:
-start_key The first key to read/write (a 0-based index). The
default value is 0.
-tn The name of the table to read or write
- -update [:<#threads=20>][:<#whether to
- ignore nonce collisions=0>]
- -write :[:<#threads=20>]
+ -update [:#threads=20][:#whether to
+ ignore nonce collisions=0]
+ -write :[:#threads=20]
-zk ZK quorum as comma-separated host names without
port numbers
-zk_root name of parent znode in zookeeper
-----
+```
-.Example Usage of LoadTestTool
-----
-$ hbase org.apache.hadoop.hbase.util.LoadTestTool -write 1:10:100 -num_keys 1000000
- -read 100:30 -num_tables 1 -data_block_encoding NONE -tn load_test_tool_NONE
-----
+#### Example Usage of LoadTestTool
-[[data.block.encoding.enable]]
-=== Enable Data Block Encoding
+```bash
+$ hbase org.apache.hadoop.hbase.util.LoadTestTool -write 1:10:100 -num_keys 1000000 \
+ -read 100:30 -num_tables 1 -data_block_encoding NONE -tn load_test_tool_NONE
+```
+
+## Enable Data Block Encoding
Codecs are built into HBase so no extra configuration is needed.
Codecs are enabled on a table by setting the `DATA_BLOCK_ENCODING` property.
Disable the table before altering its DATA_BLOCK_ENCODING setting.
Following is an example using HBase Shell:
-.Enable Data Block Encoding On a Table
-----
+#### Enable Data Block Encoding On a Table
+
+```ruby
hbase> alter 'test', { NAME => 'cf', DATA_BLOCK_ENCODING => 'FAST_DIFF' }
Updating all regions with the new schema...
0/1 regions updated.
1/1 regions updated.
Done.
0 row(s) in 2.2820 seconds
-----
+```
+
+#### Verifying a ColumnFamily's Data Block Encoding
-.Verifying a ColumnFamily's Data Block Encoding
-----
+```ruby
hbase> describe 'test'
DESCRIPTION ENABLED
'test', {NAME => 'cf', DATA_BLOCK_ENCODING => 'FAST true
@@ -639,12 +591,4 @@ DESCRIPTION ENABLED
> 'false', BLOCKSIZE => '65536', IN_MEMORY => 'fals
e', BLOCKCACHE => 'true'}
1 row(s) in 0.0650 seconds
-----
-
-:numbered:
-
-ifdef::backend-docbook[]
-[index]
-== Index
-// Generated automatically by the DocBook toolchain.
-endif::backend-docbook[]
+```
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/basic-prerequisites.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/basic-prerequisites.mdx
new file mode 100644
index 000000000000..f27d61b9187a
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/basic-prerequisites.mdx
@@ -0,0 +1,279 @@
+---
+title: "Basic Prerequisites"
+description: "This section lists required services and some required system configuration."
+---
+
+## Java [#configuration-basic-prerequisites-java]
+
+HBase runs on the Java Virtual Machine, thus all HBase deployments require a JVM runtime.
+
+The following table summarizes the recommendations of the HBase community with respect to running on various Java versions. The ✅ symbol indicates a base level of testing and willingness to help diagnose and address issues you might run into; these are the expected deployment combinations. An entry of ⚠️ means that there may be challenges with this combination, and you should look for more information before deciding to pursue this as your deployment strategy. The ❌ means this combination does not work; either an older Java version is considered deprecated by the HBase community, or this combination is known to not work. For combinations of newer JDK with older HBase releases, it's likely there are known compatibility issues that cannot be addressed under our compatibility guarantees, making the combination impossible. In some cases, specific guidance on limitations (e.g. whether compiling / unit tests work, specific operational issues, etc) are also noted. Assume any combination not listed here is considered ❌.
+
+
+ HBase recommends downstream users rely only on JDK releases that are marked as Long-Term Supported
+ (LTS), either from the OpenJDK project or vendors. At the time of this writing, the following JDK
+ releases are NOT LTS releases and are NOT tested or advocated for use by the Apache HBase
+ community: JDK9, JDK10, JDK12, JDK13, and JDK14. Community discussion around this decision is
+ recorded on [HBASE-20264](https://issues.apache.org/jira/browse/HBASE-20264).
+
+
+
+ At this time, all testing performed by the Apache HBase project runs on the HotSpot variant of the
+ JVM. When selecting your JDK distribution, please take this into consideration.
+
+
+**Java support by release line**
+
+| HBase Version | JDK 6 | JDK 7 | JDK 8 | JDK 11 | JDK 17 |
+| :-----------: | :---: | :---: | :---: | :----: | :----: |
+| HBase 2.6 | ❌ | ❌ | ✅ | ✅ | ✅ |
+| HBase 2.5 | ❌ | ❌ | ✅ | ✅ | ⚠️\* |
+| HBase 2.4 | ❌ | ❌ | ✅ | ✅ | ❌ |
+| HBase 2.3 | ❌ | ❌ | ✅ | ⚠️\* | ❌ |
+| HBase 2.0-2.2 | ❌ | ❌ | ✅ | ❌ | ❌ |
+| HBase 1.2+ | ❌ | ✅ | ✅ | ❌ | ❌ |
+| HBase 1.0-1.1 | ❌ | ✅ | ⚠️ | ❌ | ❌ |
+| HBase 0.98 | ✅ | ✅ | ⚠️ | ❌ | ❌ |
+| HBase 0.94 | ✅ | ✅ | ❌ | ❌ | ❌ |
+
+
+Preliminary support for JDK11 is introduced with HBase 2.3.0, and for JDK17 is introduced with HBase 2.5.x. We will compile and run test suites with JDK11/17 in pre commit checks and nightly checks. We will mark the support as ✅ as long as we have run some ITs with the JDK version and also there are users in the community use the JDK version in real production clusters.
+
+For JDK11/JDK17 support in HBase, please refer to [HBASE-22972](https://issues.apache.org/jira/browse/HBASE-22972) and [HBASE-26038](https://issues.apache.org/jira/browse/HBASE-26038)
+
+For JDK11/JDK17 support in Hadoop, which may also affect HBase, please refer to [HADOOP-15338](https://issues.apache.org/jira/browse/HADOOP-15338) and [HADOOP-17177](https://issues.apache.org/jira/browse/HADOOP-17177)
+
+
+
+
+ You must set `JAVA_HOME` on each node of your cluster. *hbase-env.sh* provides a handy mechanism
+ to do this.
+
+
+### Operating System Utilities
+
+#### ssh
+
+HBase uses the Secure Shell (ssh) command and utilities extensively to communicate between cluster nodes. Each server in the cluster must be running `ssh` so that the Hadoop and HBase daemons can be managed. You must be able to connect to all nodes via SSH, including the local node, from the Master as well as any backup Master, using a shared key rather than a password. You can see the basic methodology for such a set-up in Linux or Unix systems at "[Procedure: Configure Passwordless SSH Access](/docs/getting-started#procedure-configure-passwordless-ssh-access)" chapter. If your cluster nodes use OS X, see the section, [SSH: Setting up Remote Desktop and Enabling Self-Login]() on the Hadoop wiki.
+
+#### DNS
+
+HBase uses the local hostname to self-report its IP address.
+
+#### NTP
+
+The clocks on cluster nodes should be synchronized. A small amount of variation is acceptable, but larger amounts of skew can cause erratic and unexpected behavior. Time synchronization is one of the first things to check if you see unexplained problems in your cluster. It is recommended that you run a Network Time Protocol (NTP) service, or another time-synchronization mechanism on your cluster and that all nodes look to the same service for time synchronization. See the [Basic NTP Configuration](http://www.tldp.org/LDP/sag/html/basic-ntp-config.html) at _The Linux Documentation Project (TLDP)_ to set up NTP.
+
+#### Limits on Number of Files and Processes (ulimit)
+
+Apache HBase is a database. It requires the ability to open a large number of files at once. Many Linux distributions limit the number of files a single user is allowed to open to `1024` (or `256` on older versions of OS X). You can check this limit on your servers by running the command `ulimit -n` when logged in as the user which runs HBase. See [the Troubleshooting section](/docs/troubleshooting#javaioioexceptiontoo-many-open-files) for some of the problems you may experience if the limit is too low. You may also notice errors such as the following:
+
+```text
+2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Exception increateBlockOutputStream java.io.EOFException
+2010-04-06 03:04:37,542 INFO org.apache.hadoop.hdfs.DFSClient: Abandoning block blk_-6935524980745310745_1391901
+```
+
+It is recommended to raise the ulimit to at least 10,000, but more likely 10,240, because the value is usually expressed in multiples of 1024. Each ColumnFamily has at least one StoreFile, and possibly more than six StoreFiles if the region is under load. The number of open files required depends upon the number of ColumnFamilies and the number of regions. The following is a rough formula for calculating the potential number of open files on a RegionServer.
+
+**Calculate the Potential Number of Open Files:**
+
+```text
+(StoreFiles per ColumnFamily) x (regions per RegionServer)
+```
+
+For example, assuming that a schema had 3 ColumnFamilies per region with an average of 3 StoreFiles per ColumnFamily, and there are 100 regions per RegionServer, the JVM will open `3 * 3 * 100 = 900` file descriptors, not counting open JAR files, configuration files, and others. Opening a file does not take many resources, and the risk of allowing a user to open too many files is minimal.
+
+Another related setting is the number of processes a user is allowed to run at once. In Linux and Unix, the number of processes is set using the `ulimit -u` command. This should not be confused with the `nproc` command, which controls the number of CPUs available to a given user. Under load, a `ulimit -u` that is too low can cause OutOfMemoryError exceptions.
+
+Configuring the maximum number of file descriptors and processes for the user who is running the HBase process is an operating system configuration, rather than an HBase configuration. It is also important to be sure that the settings are changed for the user that actually runs HBase. To see which user started HBase, and that user's ulimit configuration, look at the first line of the HBase log for that instance.
+
+#### Example: `ulimit` Settings on Ubuntu [!toc]
+
+To configure ulimit settings on Ubuntu, edit _/etc/security/limits.conf_, which is a space-delimited file with four columns. Refer to the man page for _limits.conf_ for details about the format of this file. In the following example, the first line sets both soft and hard limits for the number of open files (nofile) to 32768 for the operating system user with the username hadoop. The second line sets the number of processes to 32000 for the same user.
+
+```text
+hadoop - nofile 32768
+hadoop - nproc 32000
+```
+
+The settings are only applied if the Pluggable Authentication Module (PAM) environment is directed to use them. To configure PAM to use these limits, be sure that the _/etc/pam.d/common-session_ file contains the following line:
+
+```text
+session required pam_limits.so
+```
+
+#### Linux Shell
+
+All of the shell scripts that come with HBase rely on the [GNU Bash](http://www.gnu.org/software/bash) shell.
+
+#### Windows
+
+Running production systems on Windows machines is not recommended.
+
+## Hadoop
+
+The following table summarizes the versions of [Hadoop](https://hadoop.apache.org) supported with each version of HBase. Older versions not appearing in this table are considered unsupported and likely missing necessary features, while newer versions are untested but may be suitable.
+
+Based on the version of HBase, you should select the most appropriate version of Hadoop. You can use Apache Hadoop, or a vendor's distribution of Hadoop. No distinction is made here. See [the Hadoop wiki](https://cwiki.apache.org/confluence/display/HADOOP2/Distributions+and+Commercial+Support) for information about vendors of Hadoop.
+
+
+Comparing to Hadoop 1.x, Hadoop 2.x is faster and includes features, such as short-circuit reads (see [Leveraging local data](/docs/performance#leveraging-local-data)), which will help improve your HBase random read profile. Hadoop 2.x also includes important bug fixes that will improve your overall HBase experience. HBase does not support running with earlier versions of Hadoop. See the table below for requirements specific to different HBase versions.
+
+Today, Hadoop 3.x is recommended as the last Hadoop 2.x release 2.10.2 was released years ago, and there is no release for Hadoop 2.x for a very long time, although the Hadoop community does not officially EOL Hadoop 2.x yet.
+
+
+
+Use the following legend to interpret these tables:
+
+- ✅ = Tested to be fully-functional
+- ❌ = Known to not be fully-functional, or there are [CVEs](https://hadoop.apache.org/cve_list.html) so we drop the support in newer minor releases
+- ⚠️ = Not tested, may/may-not function
+
+| | HBase-2.5.x | HBase-2.6.x |
+| ------------------- | ------------ | ----------- |
+| Hadoop-2.10.\[0-1\] | ❌ | ❌ |
+| Hadoop-2.10.2+ | ✅ | ✅ |
+| Hadoop-3.1.0 | ❌ | ❌ |
+| Hadoop-3.1.1+ | ❌ | ❌ |
+| Hadoop-3.2.\[0-2\] | ❌ | ❌ |
+| Hadoop-3.2.3+ | ✅ | ❌ |
+| Hadoop-3.3.\[0-1\] | ❌ | ❌ |
+| Hadoop-3.3.\[2-4\] | ✅ | ❌ |
+| Hadoop-3.3.5+ | ✅ | ✅ |
+| Hadoop-3.4.0+ | ✅ (2.5.11+) | ✅ (2.6.2+) |
+
+### Hadoop version support matrix for active release lines
+
+| | HBase-2.3.x | HBase-2.4.x |
+| ------------- | ----------- | ----------- |
+| Hadoop-2.10.x | ✅ | ✅ |
+| Hadoop-3.1.0 | ❌ | ❌ |
+| Hadoop-3.1.1+ | ✅ | ✅ |
+| Hadoop-3.2.x | ✅ | ✅ |
+| Hadoop-3.3.x | ✅ | ✅ |
+
+### Hadoop version support matrix for EOM 2.3+ release lines
+
+| | HBase-2.0.x | HBase-2.1.x | HBase-2.2.x |
+| ------------------ | ----------- | ----------- | ----------- |
+| Hadoop-2.6.1+ | ✅ | ❌ | ❌ |
+| Hadoop-2.7.\[0-6\] | ❌ | ❌ | ❌ |
+| Hadoop-2.7.7+ | ✅ | ✅ | ❌ |
+| Hadoop-2.8.\[0-2\] | ❌ | ❌ | ❌ |
+| Hadoop-2.8.\[3-4\] | ✅ | ✅ | ❌ |
+| Hadoop-2.8.5+ | ✅ | ✅ | ✅ |
+| Hadoop-2.9.\[0-1\] | ⚠️ | ❌ | ❌ |
+| Hadoop-2.9.2+ | ⚠️ | ⚠️ | ✅ |
+| Hadoop-3.0.\[0-2\] | ❌ | ❌ | ❌ |
+| Hadoop-3.0.3+ | ❌ | ✅ | ❌ |
+| Hadoop-3.1.0 | ❌ | ❌ | ❌ |
+| Hadoop-3.1.1+ | ❌ | ✅ | ✅ |
+
+### Hadoop version support matrix for EOM 2.x release lines
+
+| | HBase-1.5.x | HBase-1.6.x | HBase-1.7.x |
+| ------------------ | ----------- | ----------- | ----------- |
+| Hadoop-2.7.7+ | ✅ | ❌ | ❌ |
+| Hadoop-2.8.\[0-4\] | ❌ | ❌ | ❌ |
+| Hadoop-2.8.5+ | ✅ | ✅ | ✅ |
+| Hadoop-2.9.\[0-1\] | ❌ | ❌ | ❌ |
+| Hadoop-2.9.2+ | ✅ | ✅ | ✅ |
+| Hadoop-2.10.x | ⚠️ | ✅ | ✅ |
+
+### Hadoop version support matrix for EOM 1.5+ release lines
+
+| | HBase-1.0.x (Hadoop 1.x is NOT supported) | HBase-1.1.x | HBase-1.2.x | HBase-1.3.x | HBase-1.4.x |
+| ------------- | ----------------------------------------- | ----------- | ----------- | ----------- | ----------- |
+| Hadoop-2.4.x | ✅ | ✅ | ✅ | ✅ | ❌ |
+| Hadoop-2.5.x | ✅ | ✅ | ✅ | ✅ | ❌ |
+| Hadoop-2.6.0 | ❌ | ❌ | ❌ | ❌ | ❌ |
+| Hadoop-2.6.1+ | ⚠️ | ⚠️ | ✅ | ✅ | ❌ |
+| Hadoop-2.7.0 | ❌ | ❌ | ❌ | ❌ | ❌ |
+| Hadoop-2.7.1+ | ⚠️ | ⚠️ | ✅ | ✅ | ✅ |
+
+### Hadoop version support matrix for EOM 1.x release lines
+
+| | HBase-0.92.x | HBase-0.94.x | HBase-0.96.x | HBase-0.98.x (Support for Hadoop 1.1+ is deprecated.) |
+| ------------------ | ------------ | ------------ | ------------ | ----------------------------------------------------- |
+| Hadoop-0.20.205 | ✅ | ❌ | ❌ | ❌ |
+| Hadoop-0.22.x | ✅ | ❌ | ❌ | ❌ |
+| Hadoop-1.0.x | ❌ | ❌ | ❌ | ❌ |
+| Hadoop-1.1.x | ⚠️ | ✅ | ✅ | ⚠️ |
+| Hadoop-0.23.x | ❌ | ✅ | ⚠️ | ❌ |
+| Hadoop-2.0.x-alpha | ❌ | ⚠️ | ❌ | ❌ |
+| Hadoop-2.1.0-beta | ❌ | ⚠️ | ✅ | ❌ |
+| Hadoop-2.2.0 | ❌ | ⚠️ | ✅ | ✅ |
+| Hadoop-2.3.x | ❌ | ⚠️ | ✅ | ✅ |
+| Hadoop-2.4.x | ❌ | ⚠️ | ✅ | ✅ |
+| Hadoop-2.5.x | ❌ | ⚠️ | ✅ | ✅ |
+
+### Hadoop version support matrix for EOM pre-1.0 release lines
+
+
+ Starting around the time of Hadoop version 2.7.0, the Hadoop PMC got into the habit of calling out
+ new minor releases on their major version 2 release line as not stable / production ready. As
+ such, HBase expressly advises downstream users to avoid running on top of these releases. Note
+ that additionally the 2.8.1 release was given the same caveat by the Hadoop PMC. For reference,
+ see the release announcements for [Apache Hadoop
+ 2.7.0](https://s.apache.org/hadoop-2.7.0-announcement), [Apache Hadoop
+ 2.8.0](https://s.apache.org/hadoop-2.8.0-announcement), [Apache Hadoop
+ 2.8.1](https://s.apache.org/hadoop-2.8.1-announcement), and [Apache Hadoop
+ 2.9.0](https://s.apache.org/hadoop-2.9.0-announcement).
+
+
+
+ The Hadoop PMC called out the 3.1.0 release as not stable / production ready. As such, HBase
+ expressly advises downstream users to avoid running on top of this release. For reference, see the
+ [release announcement for Hadoop 3.1.0](https://s.apache.org/hadoop-3.1.0-announcement).
+
+
+
+ Because HBase depends on Hadoop, it bundles Hadoop jars under its *lib* directory. The bundled
+ jars are ONLY for use in stand-alone mode. In distributed mode, it is *critical* that the version
+ of Hadoop that is out on your cluster match what is under HBase. Replace the hadoop jars found in
+ the HBase lib directory with the equivalent hadoop jars from the version you are running on your
+ cluster to avoid version mismatch issues. Make sure you replace the jars under HBase across your
+ whole cluster. Hadoop version mismatch issues have various manifestations. Check for mismatch if
+ HBase appears hung.
+
+
+### Hadoop 3 Support for the HBase Binary Releases and Maven Artifacts
+
+For HBase 2.5.1 and earlier, the official HBase binary releases and Maven artifacts were built with Hadoop 2.x.
+
+Starting with HBase 2.5.2, HBase provides binary releases and Maven artifacts built with both Hadoop 2.x and Hadoop 3.x. The Hadoop 2 artifacts do not have any version suffix, the Hadoop 3 artifacts add the `-hadoop-3` suffix to the version. i.e. `hbase-2.5.2-bin.tar.gz.asc` is the Binary release built with Hadoop2, and `hbase-2.5.2-hadoop3-bin.tar.gz` is the release built with Hadoop 3.
+
+### Hadoop 3 version policy
+
+Each HBase release has a default Hadoop 3 version. This is used when the Hadoop 3 version is not specified during build, and for building the official binary releases and artifacts. Generally when a new minor version is released (i.e. 2.5.0) the default version is set to the latest supported Hadoop 3 version at the start of the release process.
+
+Up to HBase 2.5.10 and 2.6.1 even if HBase added support for newer Hadoop 3 releases in a patch release, the default Hadoop 3 version (and the one used in the official binary releases) was not updated. This simplified upgrading, but meant that HBase releases often included old unfixed CVEs both from Hadoop and Hadoop's dependencies, even when newer Hadoop releases with fixes were available.
+
+Starting with HBase 2.5.11 and 2.6.2, the default Hadoop 3 version is always set to the latest supported Hadoop 3 version, and is also used for the `-hadoop3` binary releases and artifacts. This will drastically reduce the number of known CVEs shipped in the HBase binary releases, and make sure that all fixes and improvements in Hadoop are included.
+
+### `dfs.datanode.max.transfer.threads`
+
+An HDFS DataNode has an upper bound on the number of files that it will serve at any one time. Before doing any loading, make sure you have configured Hadoop's _conf/hdfs-site.xml_, setting the `dfs.datanode.max.transfer.threads` value to at least the following:
+
+```xml
+
+ dfs.datanode.max.transfer.threads
+ 4096
+
+```
+
+Be sure to restart your HDFS after making the above configuration.
+
+Not having this configuration in place makes for strange-looking failures. One manifestation is a complaint about missing blocks. For example:
+
+```text
+10/12/08 20:10:31 INFO hdfs.DFSClient: Could not obtain block
+ blk_XXXXXXXXXXXXXXXXXXXXXX_YYYYYYYY from any node: java.io.IOException: No live nodes
+ contain current block. Will get new block locations from namenode and retry...
+```
+
+See also [Case Studies](/docs/case-studies#case-study-4-maxtransferthreads-config) and note that this property was previously known as `dfs.datanode.max.xcievers` (e.g. [Hadoop HDFS: Deceived by Xciever](http://ccgtech.blogspot.com/2010/02/hadoop-hdfs-deceived-by-xciever.html)).
+
+## ZooKeeper Requirements
+
+An Apache ZooKeeper quorum is required. The exact version depends on your version of HBase, though the minimum ZooKeeper version is 3.4.x due to the `useMulti` feature made default in 1.0.0 (see [HBASE-16598](https://issues.apache.org/jira/browse/HBASE-16598)).
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/confirm.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/confirm.mdx
new file mode 100644
index 000000000000..316f752e2e2f
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/confirm.mdx
@@ -0,0 +1,31 @@
+---
+title: "Running and Confirming Your Installation"
+description: "Steps to start HBase services and verify your installation is working correctly."
+---
+
+Make sure HDFS is running first. Start and stop the Hadoop HDFS daemons by running _bin/start-hdfs.sh_ over in the `HADOOP_HOME` directory. You can ensure it started properly by testing the `put` and `get` of files into the Hadoop filesystem. HBase does not normally use the MapReduce or YARN daemons. These do not need to be started.
+
+_If_ you are managing your own ZooKeeper, start it and confirm it's running, else HBase will start up ZooKeeper for you as part of its start process.
+
+Start HBase with the following command:
+
+```bash
+bin/start-hbase.sh
+```
+
+Run the above from the `HBASE_HOME` directory.
+
+You should now have a running HBase instance. HBase logs can be found in the _logs_ subdirectory. Check them out especially if HBase had trouble starting.
+
+HBase also puts up a UI listing vital attributes. By default it's deployed on the Master host at port 16010 (HBase RegionServers listen on port 16020 by default and put up an informational HTTP server at port 16030). If the Master is running on a host named `master.example.org` on the default port, point your browser at http://master.example.org:16010 to see the web interface.
+
+Once HBase has started, see the [shell exercises](/docs/getting-started#procedure-use-hbase-for-the-first-time) documentation for how to create tables, add data, scan your insertions, and finally disable and drop your tables.
+
+To stop HBase after exiting the HBase shell enter
+
+```bash
+$ ./bin/stop-hbase.sh
+stopping hbase...............
+```
+
+Shutdown can take a moment to complete. It can take longer if your cluster is comprised of many machines. If you are running a distributed operation, be sure to wait until HBase has shut down completely before stopping the Hadoop daemons.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/default.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/default.mdx
new file mode 100644
index 000000000000..f1f590579f29
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/default.mdx
@@ -0,0 +1,131 @@
+---
+title: "Default Configuration"
+description: "HBase default configuration properties and how to customize them via hbase-site.xml for site-specific settings."
+---
+
+## _hbase-site.xml_ and _hbase-default.xml_
+
+Just as in Hadoop where you add site-specific HDFS configuration to the _hdfs-site.xml_ file, for HBase, site specific customizations go into the file _conf/hbase-site.xml_. For the list of configurable properties, see [hbase default configurations](/docs/configuration/default#configuration-default-hbase-default-configuration) below or view the raw _hbase-default.xml_ source file in the HBase source code at _src/main/resources_.
+
+Not all configuration options make it out to _hbase-default.xml_. Some configurations would only appear in source code; the only way to identify these changes are through code review.
+
+Currently, changes here will require a cluster restart for HBase to notice the change.
+
+## HBase Default configuration [#configuration-default-hbase-default-configuration]
+
+The documentation below is generated using the default hbase configuration file, _hbase-default.xml_, as source.
+
+./hbase-default.md
+
+## hbase-env.sh [#configuration-default-hbase-env-sh]
+
+Set HBase environment variables in this file. Examples include options to pass the JVM on start of an HBase daemon such as heap size and garbage collector configs. You can also set configurations for HBase configuration, log directories, niceness, ssh options, where to locate process pid files, etc. Open the file at _conf/hbase-env.sh_ and peruse its content. Each option is fairly well documented. Add your own environment variables here if you want them read by HBase daemons on startup.
+
+Changes here will require a cluster restart for HBase to notice the change.
+
+## _log4j2.properties_
+
+Since version 2.5.0, HBase has upgraded to Log4j2, so the configuration file name and format has changed. Read more in [Apache Log4j2](https://logging.apache.org/log4j/2.x/index.html).
+
+Edit this file to change rate at which HBase files are rolled and to change the level at which HBase logs messages.
+
+Changes here will require a cluster restart for HBase to notice the change though log levels can be changed for particular daemons via the HBase UI.
+
+## Client configuration and dependencies connecting to an HBase cluster
+
+If you are running HBase in standalone mode, you don't need to configure anything for your client to work provided that they are all on the same machine.
+
+Starting release 3.0.0, the default connection registry has been switched to a rpc based implementation. Refer to [Rpc Connection Registry (new as of 2.5.0)](/docs/architecture/client#rpc-connection-registry-new-as-of-250). Depending on your HBase version, following is the expected minimal client configuration.
+
+### Up until 2.x.y releases
+
+In 2.x.y releases, the default connection registry was based on ZooKeeper as the source of truth. This means that the clients always looked up ZooKeeper znodes to fetch the required metadata. For example, if an active master crashed and the a new master is elected, clients looked up the master znode to fetch the active master address (similarly for meta locations). This meant that the clients needed to have access to ZooKeeper and need to know the ZooKeeper ensemble information before they can do anything. This can be configured in the client configuration xml as follows:
+
+```xml
+
+
+
+
+ hbase.zookeeper.quorum
+ example1,example2,example3
+ Zookeeper ensemble information
+
+
+```
+
+### Starting from 3.0.0 release
+
+The default implementation was switched to a rpc based connection registry. With this implementation, by default clients contact the active or stand-by master RPC end points to fetch the connection registry information. This means that the clients should have access to the list of active and master end points before they can do anything. This can be configured in the client configuration xml as follows:
+
+```xml
+
+
+
+
+ hbase.masters
+ example1,example2,example3
+ List of master rpc end points for the hbase cluster.
+
+
+```
+
+The configuration value for _hbase.masters_ is a comma separated list of _host:port_ values. If no port value is specified, the default of _16000_ is assumed.
+
+Of course you are free to specify bootstrap nodes other than masters, like:
+
+```xml
+
+
+
+ hbase.client.bootstrap.servers
+ server1:16020,server2:16020,server3:16020
+
+```
+
+The configuration value for _hbase.client.bootstrap.servers_ is a comma separated list of _host:port_ values. Notice that port must be specified here.
+
+Usually these configurations are kept out in the _hbase-site.xml_ and is picked up by the client from the `CLASSPATH`.
+
+If you are configuring an IDE to run an HBase client, you should include the _conf/_ directory on your classpath so _hbase-site.xml_ settings can be found (or add _src/test/resources_ to pick up the hbase-site.xml used by tests).
+
+For Java applications using Maven, including the hbase-shaded-client module is the recommended dependency when connecting to a cluster:
+
+```xml
+
+ org.apache.hbase
+ hbase-shaded-client
+ 2.0.0
+
+```
+
+### Java client configuration
+
+The configuration used by a Java client is kept in an [HBaseConfiguration](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/HBaseConfiguration) instance.
+
+The factory method on HBaseConfiguration, `HBaseConfiguration.create();`, on invocation, will read in the content of the first _hbase-site.xml_ found on the client's `CLASSPATH`, if one is present (Invocation will also factor in any _hbase-default.xml_ found; an _hbase-default.xml_ ships inside the _hbase.X.X.X.jar_). It is also possible to specify configuration directly without having to read from a _hbase-site.xml_.
+
+For example, to set the ZooKeeper ensemble or bootstrap nodes for the cluster programmatically do as follows:
+
+```java
+Configuration config = HBaseConfiguration.create();
+config.set("hbase.zookeeper.quorum", "localhost"); // Until 2.x.y versions
+// ---- or ----
+config.set("hbase.client.bootstrap.servers", "localhost:1234"); // Starting 3.0.0 version
+```
+
+## Timeout settings
+
+HBase provides a wide variety of timeout settings to limit the execution time of various remote operations.
+
+- hbase.rpc.timeout
+- hbase.rpc.read.timeout
+- hbase.rpc.write.timeout
+- hbase.client.operation.timeout
+- hbase.client.meta.operation.timeout
+- hbase.client.scanner.timeout.period
+
+The `hbase.rpc.timeout` property limits how long a single RPC call can run before timing out. To fine tune read or write related RPC timeouts set `hbase.rpc.read.timeout` and `hbase.rpc.write.timeout` configuration properties. In the absence of these properties `hbase.rpc.timeout` will be used.
+
+A higher-level timeout is `hbase.client.operation.timeout` which is valid for each client call. When an RPC call fails for instance for a timeout due to `hbase.rpc.timeout` it will be retried until `hbase.client.operation.timeout` is reached. Client operation timeout for system tables can be fine tuned by setting `hbase.client.meta.operation.timeout` configuration value. When this is not set its value will use `hbase.client.operation.timeout`.
+
+Timeout for scan operations is controlled differently. Use `hbase.client.scanner.timeout.period` property to set this timeout.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/dynamic.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/dynamic.mdx
new file mode 100644
index 000000000000..0a4a575e0897
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/dynamic.mdx
@@ -0,0 +1,91 @@
+---
+title: "Dynamic Configuration"
+description: "How to change HBase configuration properties dynamically without server restart using update_config commands."
+---
+
+It is possible to change a subset of the configuration without requiring a server restart. In the HBase shell, the operations `update_config`, `update_all_config` and `update_rsgroup_config` will prompt a server, all servers or all servers in the RSGroup to reload configuration.
+
+Only a subset of all configurations can currently be changed in the running server. Here are those configurations:
+
+Configurations that support dynamically change
+
+| Key |
+| ---------------------------------------------------------------------- |
+| hbase.ipc.server.fallback-to-simple-auth-allowed |
+| hbase.cleaner.scan.dir.concurrent.size |
+| hbase.coprocessor.master.classes |
+| hbase.coprocessor.region.classes |
+| hbase.coprocessor.regionserver.classes |
+| hbase.coprocessor.user.region.classes |
+| hbase.regionserver.thread.compaction.large |
+| hbase.regionserver.thread.compaction.small |
+| hbase.regionserver.thread.split |
+| hbase.regionserver.throughput.controller |
+| hbase.regionserver.thread.hfilecleaner.throttle |
+| hbase.regionserver.hfilecleaner.large.queue.size |
+| hbase.regionserver.hfilecleaner.small.queue.size |
+| hbase.regionserver.hfilecleaner.large.thread.count |
+| hbase.regionserver.hfilecleaner.small.thread.count |
+| hbase.regionserver.hfilecleaner.thread.timeout.msec |
+| hbase.regionserver.hfilecleaner.thread.check.interval.msec |
+| hbase.regionserver.flush.throughput.controller |
+| hbase.hstore.compaction.max.size |
+| hbase.hstore.compaction.max.size.offpeak |
+| hbase.hstore.compaction.min.size |
+| hbase.hstore.compaction.min |
+| hbase.hstore.compaction.max |
+| hbase.hstore.compaction.ratio |
+| hbase.hstore.compaction.ratio.offpeak |
+| hbase.regionserver.thread.compaction.throttle |
+| hbase.hregion.majorcompaction |
+| hbase.hregion.majorcompaction.jitter |
+| hbase.hstore.min.locality.to.skip.major.compact |
+| hbase.hstore.compaction.date.tiered.max.storefile.age.millis |
+| hbase.hstore.compaction.date.tiered.incoming.window.min |
+| hbase.hstore.compaction.date.tiered.window.policy.class |
+| hbase.hstore.compaction.date.tiered.single.output.for.minor.compaction |
+| hbase.hstore.compaction.date.tiered.window.factory.class |
+| hbase.offpeak.start.hour |
+| hbase.offpeak.end.hour |
+| hbase.oldwals.cleaner.thread.size |
+| hbase.oldwals.cleaner.thread.timeout.msec |
+| hbase.oldwals.cleaner.thread.check.interval.msec |
+| hbase.procedure.worker.keep.alive.time.msec |
+| hbase.procedure.worker.add.stuck.percentage |
+| hbase.procedure.worker.monitor.interval.msec |
+| hbase.procedure.worker.stuck.threshold.msec |
+| hbase.regions.slop |
+| hbase.regions.overallSlop |
+| hbase.balancer.tablesOnMaster |
+| hbase.balancer.tablesOnMaster.systemTablesOnly |
+| hbase.util.ip.to.rack.determiner |
+| hbase.ipc.server.max.callqueue.length |
+| hbase.ipc.server.priority.max.callqueue.length |
+| hbase.ipc.server.callqueue.type |
+| hbase.ipc.server.callqueue.codel.target.delay |
+| hbase.ipc.server.callqueue.codel.interval |
+| hbase.ipc.server.callqueue.codel.lifo.threshold |
+| hbase.master.balancer.stochastic.maxSteps |
+| hbase.master.balancer.stochastic.stepsPerRegion |
+| hbase.master.balancer.stochastic.maxRunningTime |
+| hbase.master.balancer.stochastic.runMaxSteps |
+| hbase.master.balancer.stochastic.numRegionLoadsToRemember |
+| hbase.master.loadbalance.bytable |
+| hbase.master.balancer.stochastic.minCostNeedBalance |
+| hbase.master.balancer.stochastic.localityCost |
+| hbase.master.balancer.stochastic.rackLocalityCost |
+| hbase.master.balancer.stochastic.readRequestCost |
+| hbase.master.balancer.stochastic.writeRequestCost |
+| hbase.master.balancer.stochastic.memstoreSizeCost |
+| hbase.master.balancer.stochastic.storefileSizeCost |
+| hbase.master.balancer.stochastic.regionReplicaHostCostKey |
+| hbase.master.balancer.stochastic.regionReplicaRackCostKey |
+| hbase.master.balancer.stochastic.regionCountCost |
+| hbase.master.balancer.stochastic.primaryRegionCountCost |
+| hbase.master.balancer.stochastic.moveCost |
+| hbase.master.balancer.stochastic.moveCost.offpeak |
+| hbase.master.balancer.stochastic.maxMovePercent |
+| hbase.master.balancer.stochastic.tableSkewCost |
+| hbase.master.regions.recovery.check.interval |
+| hbase.regions.recovery.store.file.ref.count |
+| hbase.rsgroup.fallback.enable |
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/example.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/example.mdx
new file mode 100644
index 000000000000..60102683b776
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/example.mdx
@@ -0,0 +1,74 @@
+---
+title: "Example Configurations"
+description: "Sample configuration files and setup examples for distributed HBase clusters."
+---
+
+## Basic Distributed HBase Install
+
+Here is a basic configuration example for a distributed ten node cluster: \* The nodes are named `example0`, `example1`, etc., through node `example9` in this example. \* The HBase Master and the HDFS NameNode are running on the node `example0`. \* RegionServers run on nodes `example1`-`example9`. \* A 3-node ZooKeeper ensemble runs on `example1`, `example2`, and `example3` on the default ports. \* ZooKeeper data is persisted to the directory _/export/zookeeper_.
+
+Below we show what the main configuration files — _hbase-site.xml_, _regionservers_, and _hbase-env.sh_ — found in the HBase _conf_ directory might look like.
+
+### hbase-site.xml
+
+```xml
+
+
+
+
+ hbase.zookeeper.quorum
+ example1,example2,example3
+ The directory shared by RegionServers.
+
+
+ hbase.zookeeper.property.dataDir
+ /export/zookeeper
+ Property from ZooKeeper config zoo.cfg.
+ The directory where the snapshot is stored.
+
+
+
+ hbase.rootdir
+ hdfs://example0:9000/hbase
+ The directory shared by RegionServers.
+
+
+ hbase.cluster.distributed
+ true
+ The mode the cluster will be in. Possible values are
+ false: standalone and pseudo-distributed setups with managed ZooKeeper
+ true: fully-distributed with unmanaged ZooKeeper Quorum (see hbase-env.sh)
+
+
+
+```
+
+### _regionservers_
+
+In this file you list the nodes that will run RegionServers. In our case, these nodes are `example1`-`example9`.
+
+```text
+example1
+example2
+example3
+example4
+example5
+example6
+example7
+example8
+example9
+```
+
+### hbase-env.sh [#configuration-example-basic-distributed-hbase-install-hbase-env-sh]
+
+The following lines in the _hbase-env.sh_ file show how to set the `JAVA_HOME` environment variable (required for HBase) and set the heap to 4 GB (rather than the default value of 1 GB). If you copy and paste this example, be sure to adjust the `JAVA_HOME` to suit your environment.
+
+```bash
+# The java implementation to use.
+export JAVA_HOME=/usr/java/jdk1.8.0/
+
+# The maximum amount of heap to use. Default is left to JVM default.
+export HBASE_HEAPSIZE=4G
+```
+
+Use rsync to copy the content of the _conf_ directory to all nodes of the cluster.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/hbase-default.md b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/hbase-default.md
new file mode 100644
index 000000000000..0455d04698b4
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/hbase-default.md
@@ -0,0 +1,1174 @@
+---
+title: "HBase Default Configuration"
+description: "Complete reference of all HBase configuration properties with descriptions and default values."
+---
+
+#### `hbase.tmp.dir` [!toc]
+
+**Description:** Temporary directory on the local filesystem. Change this setting to point to a location more permanent than '/tmp', the usual resolve for java.io.tmpdir, as the '/tmp' directory is cleared on machine restart.
+**Default:** `${java.io.tmpdir}/hbase-${user.name}`
+
+#### `hbase.rootdir` [!toc]
+
+**Description:** The directory shared by region servers and into which HBase persists. The URL should be 'fully-qualified' to include the filesystem scheme. For example, to specify the HDFS directory '/hbase' where the HDFS instance's namenode is running at namenode.example.org on port 9000, set this value to: hdfs://namenode.example.org:9000/hbase. By default, we write to whatever ${hbase.tmp.dir} is set too -- usually /tmp -- so change this configuration or else all data will be lost on machine restart.
+**Default:** `${hbase.tmp.dir}/hbase`
+
+#### `hbase.cluster.distributed` [!toc]
+
+**Description:** The mode the cluster will be in. Possible values are false for standalone mode and true for distributed mode. If false, startup will run all HBase and ZooKeeper daemons together in the one JVM.
+**Default:** `false`
+
+#### `hbase.zookeeper.quorum` [!toc]
+
+**Description:** Comma separated list of servers in the ZooKeeper ensemble (This config. should have been named hbase.zookeeper.ensemble). For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com". By default this is set to localhost for local and pseudo-distributed modes of operation. For a fully-distributed setup, this should be set to a full list of ZooKeeper ensemble servers. If HBASE_MANAGES_ZK is set in hbase-env.sh this is the list of servers which hbase will start/stop ZooKeeper on as part of cluster start/stop. Client-side, we will take this list of ensemble members and put it together with the hbase.zookeeper.property.clientPort config. and pass it into zookeeper constructor as the connectString parameter.
+**Default:** `127.0.0.1`
+
+#### `zookeeper.recovery.retry.maxsleeptime` [!toc]
+
+**Description:** Max sleep time before retry zookeeper operations in milliseconds, a max time is needed here so that sleep time won't grow unboundedly
+**Default:** `60000`
+
+#### `hbase.local.dir` [!toc]
+
+**Description:** Directory on the local filesystem to be used as a local storage.
+**Default:** `${hbase.tmp.dir}/local/`
+
+#### `hbase.master.port` [!toc]
+
+**Description:** The port the HBase Master should bind to.
+**Default:** `16000`
+
+#### `hbase.master.info.port` [!toc]
+
+**Description:** The port for the HBase Master web UI. Set to -1 if you do not want a UI instance run.
+**Default:** `16010`
+
+#### `hbase.master.info.bindAddress` [!toc]
+
+**Description:** The bind address for the HBase Master web UI
+**Default:** `0.0.0.0`
+
+#### `hbase.master.logcleaner.plugins` [!toc]
+
+**Description:** A comma-separated list of BaseLogCleanerDelegate invoked by the LogsCleaner service. These WAL cleaners are called in order, so put the cleaner that prunes the most files in front. To implement your own BaseLogCleanerDelegate, just put it in HBase's classpath and add the fully qualified class name here. Always add the above default log cleaners in the list.
+**Default:** `org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner,org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner,org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner`
+
+#### `hbase.master.logcleaner.ttl` [!toc]
+
+**Description:** How long a WAL remain in the archive ({hbase.rootdir}/oldWALs) directory, after which it will be cleaned by a Master thread. The value is in milliseconds.
+**Default:** `600000`
+
+#### `hbase.master.hfilecleaner.plugins` [!toc]
+
+**Description:** A comma-separated list of BaseHFileCleanerDelegate invoked by the HFileCleaner service. These HFiles cleaners are called in order, so put the cleaner that prunes the most files in front. To implement your own BaseHFileCleanerDelegate, just put it in HBase's classpath and add the fully qualified class name here. Always add the above default hfile cleaners in the list as they will be overwritten in hbase-site.xml.
+**Default:** `org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner,org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner`
+
+#### `hbase.master.infoserver.redirect` [!toc]
+
+**Description:** Whether or not the Master listens to the Master web UI port (hbase.master.info.port) and redirects requests to the web UI server shared by the Master and RegionServer. Config. makes sense when Master is serving Regions (not the default).
+**Default:** `true`
+
+#### `hbase.master.fileSplitTimeout` [!toc]
+
+**Description:** Splitting a region, how long to wait on the file-splitting step before aborting the attempt. Default: 600000. This setting used to be known as hbase.regionserver.fileSplitTimeout in hbase-1.x. Split is now run master-side hence the rename (If a 'hbase.master.fileSplitTimeout' setting found, will use it to prime the current 'hbase.master.fileSplitTimeout' Configuration.
+**Default:** `600000`
+
+#### `hbase.regionserver.port` [!toc]
+
+**Description:** The port the HBase RegionServer binds to.
+**Default:** `16020`
+
+#### `hbase.regionserver.info.port` [!toc]
+
+**Description:** The port for the HBase RegionServer web UI Set to -1 if you do not want the RegionServer UI to run.
+**Default:** `16030`
+
+#### `hbase.regionserver.info.bindAddress` [!toc]
+
+**Description:** The address for the HBase RegionServer web UI
+**Default:** `0.0.0.0`
+
+#### `hbase.regionserver.info.port.auto` [!toc]
+
+**Description:** Whether or not the Master or RegionServer UI should search for a port to bind to. Enables automatic port search if hbase.regionserver.info.port is already in use. Useful for testing, turned off by default.
+**Default:** `false`
+
+#### `hbase.regionserver.handler.count` [!toc]
+
+**Description:** Count of RPC Listener instances spun up on RegionServers. Same property is used by the Master for count of master handlers. Too many handlers can be counter-productive. Make it a multiple of CPU count. If mostly read-only, handlers count close to cpu count does well. Start with twice the CPU count and tune from there.
+**Default:** `30`
+
+#### `hbase.ipc.server.callqueue.handler.factor` [!toc]
+
+**Description:** Factor to determine the number of call queues. A value of 0 means a single queue shared between all the handlers. A value of 1 means that each handler has its own queue.
+**Default:** `0.1`
+
+#### `hbase.ipc.server.callqueue.read.ratio` [!toc]
+
+**Description:** Split the call queues into read and write queues. The specified interval (which should be between 0.0 and 1.0) will be multiplied by the number of call queues. A value of 0 indicate to not split the call queues, meaning that both read and write requests will be pushed to the same set of queues. A value lower than 0.5 means that there will be less read queues than write queues. A value of 0.5 means there will be the same number of read and write queues. A value greater than 0.5 means that there will be more read queues than write queues. A value of 1.0 means that all the queues except one are used to dispatch read requests. Example: Given the total number of call queues being 10 a read.ratio of 0 means that: the 10 queues will contain both read/write requests. a read.ratio of 0.3 means that: 3 queues will contain only read requests and 7 queues will contain only write requests. a read.ratio of 0.5 means that: 5 queues will contain only read requests and 5 queues will contain only write requests. a read.ratio of 0.8 means that: 8 queues will contain only read requests and 2 queues will contain only write requests. a read.ratio of 1 means that: 9 queues will contain only read requests and 1 queues will contain only write requests.
+**Default:** `0`
+
+#### `hbase.ipc.server.callqueue.scan.ratio` [!toc]
+
+**Description:** Given the number of read call queues, calculated from the total number of call queues multiplied by the callqueue.read.ratio, the scan.ratio property will split the read call queues into small-read and long-read queues. A value lower than 0.5 means that there will be less long-read queues than short-read queues. A value of 0.5 means that there will be the same number of short-read and long-read queues. A value greater than 0.5 means that there will be more long-read queues than short-read queues A value of 0 or 1 indicate to use the same set of queues for gets and scans. Example: Given the total number of read call queues being 8 a scan.ratio of 0 or 1 means that: 8 queues will contain both long and short read requests. a scan.ratio of 0.3 means that: 2 queues will contain only long-read requests and 6 queues will contain only short-read requests. a scan.ratio of 0.5 means that: 4 queues will contain only long-read requests and 4 queues will contain only short-read requests. a scan.ratio of 0.8 means that: 6 queues will contain only long-read requests and 2 queues will contain only short-read requests.
+**Default:** `0`
+
+#### `hbase.regionserver.msginterval` [!toc]
+
+**Description:** Interval between messages from the RegionServer to Master in milliseconds.
+**Default:** `3000`
+
+#### `hbase.regionserver.logroll.period` [!toc]
+
+**Description:** Period at which we will roll the commit log regardless of how many edits it has.
+**Default:** `3600000`
+
+#### `hbase.regionserver.logroll.errors.tolerated` [!toc]
+
+**Description:** The number of consecutive WAL close errors we will allow before triggering a server abort. A setting of 0 will cause the region server to abort if closing the current WAL writer fails during log rolling. Even a small value (2 or 3) will allow a region server to ride over transient HDFS errors.
+**Default:** `2`
+
+#### `hbase.regionserver.free.heap.min.memory.size` [!toc]
+
+**Description:** Defines the minimum amount of heap memory that must remain free for the RegionServer to start, specified in bytes or human-readable formats like '512m' for megabytes or '4g' for gigabytes. If not set, the default is 20% of the total heap size. To disable the check entirely, set this value to 0. If the combined memory usage of memstore and block cache exceeds (total heap - this value), the RegionServer will fail to start.
+**Default:** `(empty)`
+
+#### `hbase.regionserver.global.memstore.size` [!toc]
+
+**Description:** Maximum size of all memstores in a region server before new updates are blocked and flushes are forced. Defaults to 40% of heap (0.4). Updates are blocked and flushes are forced until size of all memstores in a region server hits hbase.regionserver.global.memstore.size.lower.limit. The default value in this configuration has been intentionally left empty in order to honor the old hbase.regionserver.global.memstore.upperLimit property if present.
+**Default:** `(empty)`
+
+#### `hbase.regionserver.global.memstore.size.lower.limit` [!toc]
+
+**Description:** Maximum size of all memstores in a region server before flushes are forced. Defaults to 95% of hbase.regionserver.global.memstore.size (0.95). A 100% value for this value causes the minimum possible flushing to occur when updates are blocked due to memstore limiting. The default value in this configuration has been intentionally left empty in order to honor the old hbase.regionserver.global.memstore.lowerLimit property if present.
+**Default:** `(empty)`
+
+#### `hbase.systemtables.compacting.memstore.type` [!toc]
+
+**Description:** Determines the type of memstore to be used for system tables like META, namespace tables etc. By default NONE is the type and hence we use the default memstore for all the system tables. If we need to use compacting memstore for system tables then set this property to BASIC/EAGER
+**Default:** `NONE`
+
+#### `hbase.regionserver.optionalcacheflushinterval` [!toc]
+
+**Description:** Maximum amount of time an edit lives in memory before being automatically flushed. Default 1 hour. Set it to 0 to disable automatic flushing.
+**Default:** `3600000`
+
+#### `hbase.regionserver.dns.interface` [!toc]
+
+**Description:** The name of the Network Interface from which a region server should report its IP address.
+**Default:** `default`
+
+#### `hbase.regionserver.dns.nameserver` [!toc]
+
+**Description:** The host name or IP address of the name server (DNS) which a region server should use to determine the host name used by the master for communication and display purposes.
+**Default:** `default`
+
+#### `hbase.regionserver.region.split.policy` [!toc]
+
+**Description:** A split policy determines when a region should be split. The various other split policies that are available currently are BusyRegionSplitPolicy, ConstantSizeRegionSplitPolicy, DisabledRegionSplitPolicy, DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy, and SteppingSplitPolicy. DisabledRegionSplitPolicy blocks manual region splitting.
+**Default:** `org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy`
+
+#### `hbase.regionserver.regionSplitLimit` [!toc]
+
+**Description:** Limit for the number of regions after which no more region splitting should take place. This is not hard limit for the number of regions but acts as a guideline for the regionserver to stop splitting after a certain limit. Default is set to 1000.
+**Default:** `1000`
+
+#### `zookeeper.session.timeout` [!toc]
+
+**Description:** ZooKeeper session timeout in milliseconds. It is used in two different ways. First, this value is used in the ZK client that HBase uses to connect to the ensemble. It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#ch_zkSessions. For example, if an HBase region server connects to a ZK ensemble that's also managed by HBase, then the session timeout will be the one specified by this configuration. But, a region server that connects to an ensemble managed with a different configuration will be subjected that ensemble's maxSessionTimeout. So, even though HBase might propose using 90 seconds, the ensemble can have a max timeout lower than this and it will take precedence. The current default maxSessionTimeout that ZK ships with is 40 seconds, which is lower than HBase's.
+**Default:** `90000`
+
+#### `zookeeper.znode.parent` [!toc]
+
+**Description:** Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper files that are configured with a relative path will go under this node. By default, all of HBase's ZooKeeper file paths are configured with a relative path, so they will all go under this directory unless changed.
+**Default:** `/hbase`
+
+#### `zookeeper.znode.acl.parent` [!toc]
+
+**Description:** Root ZNode for access control lists.
+**Default:** `acl`
+
+#### `hbase.zookeeper.dns.interface` [!toc]
+
+**Description:** The name of the Network Interface from which a ZooKeeper server should report its IP address.
+**Default:** `default`
+
+#### `hbase.zookeeper.dns.nameserver` [!toc]
+
+**Description:** The host name or IP address of the name server (DNS) which a ZooKeeper server should use to determine the host name used by the master for communication and display purposes.
+**Default:** `default`
+
+#### `hbase.zookeeper.peerport` [!toc]
+
+**Description:** Port used by ZooKeeper peers to talk to each other. See https://zookeeper.apache.org/doc/r3.4.10/zookeeperStarted.html#sc_RunningReplicatedZooKeeper for more information.
+**Default:** `2888`
+
+#### `hbase.zookeeper.leaderport` [!toc]
+
+**Description:** Port used by ZooKeeper for leader election. See https://zookeeper.apache.org/doc/r3.4.10/zookeeperStarted.html#sc_RunningReplicatedZooKeeper for more information.
+**Default:** `3888`
+
+#### `hbase.zookeeper.property.initLimit` [!toc]
+
+**Description:**
+**Default:** `10`
+
+#### `hbase.zookeeper.property.initLimit` [!toc]
+
+**Description:** Property from ZooKeeper's config zoo.cfg. The number of ticks that the initial synchronization phase can take.
+**Default:** `10`
+
+#### `hbase.zookeeper.property.syncLimit` [!toc]
+
+**Description:** Property from ZooKeeper's config zoo.cfg. The number of ticks that can pass between sending a request and getting an acknowledgment.
+**Default:** `5`
+
+#### `hbase.zookeeper.property.dataDir` [!toc]
+
+**Description:** Property from ZooKeeper's config zoo.cfg. The directory where the snapshot is stored.
+**Default:** `${hbase.tmp.dir}/zookeeper`
+
+#### `hbase.zookeeper.property.clientPort` [!toc]
+
+**Description:** Property from ZooKeeper's config zoo.cfg. The port at which the clients will connect.
+**Default:** `2181`
+
+#### `hbase.zookeeper.property.maxClientCnxns` [!toc]
+
+**Description:** Property from ZooKeeper's config zoo.cfg. Limit on number of concurrent connections (at the socket level) that a single client, identified by IP address, may make to a single member of the ZooKeeper ensemble. Set high to avoid zk connection issues running standalone and pseudo-distributed.
+**Default:** `300`
+
+#### `hbase.client.write.buffer` [!toc]
+
+**Description:** Default size of the BufferedMutator write buffer in bytes. A bigger buffer takes more memory -- on both the client and server side since server instantiates the passed write buffer to process it -- but a larger buffer size reduces the number of RPCs made. For an estimate of server-side memory-used, evaluate hbase.client.write.buffer * hbase.regionserver.handler.count
+**Default:** `2097152`
+
+#### `hbase.client.pause` [!toc]
+
+**Description:** General client pause value. Used mostly as value to wait before running a retry of a failed get, region lookup, etc. See hbase.client.retries.number for description of how we backoff from this initial pause amount and how this pause works w/ retries.
+**Default:** `100`
+
+#### `hbase.client.pause.server.overloaded` [!toc]
+
+**Description:** Pause time when encountering an exception indicating a server is overloaded, CallQueueTooBigException or CallDroppedException. Set this property to a higher value than hbase.client.pause if you observe frequent CallQueueTooBigException or CallDroppedException from the same RegionServer and the call queue there keeps filling up. This config used to be called hbase.client.pause.cqtbe, which has been deprecated as of 2.5.0.
+**Default:** `(empty)`
+
+#### `hbase.client.retries.number` [!toc]
+
+**Description:** Maximum retries. Used as maximum for all retryable operations such as the getting of a cell's value, starting a row update, etc. Retry interval is a rough function based on hbase.client.pause. At first we retry at this interval but then with backoff, we pretty quickly reach retrying every ten seconds. See HConstants#RETRY_BACKOFF for how the backup ramps up. Change this setting and hbase.client.pause to suit your workload.
+**Default:** `15`
+
+#### `hbase.client.max.total.tasks` [!toc]
+
+**Description:** The maximum number of concurrent mutation tasks a single HTable instance will send to the cluster.
+**Default:** `100`
+
+#### `hbase.client.max.perserver.tasks` [!toc]
+
+**Description:** The maximum number of concurrent mutation tasks a single HTable instance will send to a single region server.
+**Default:** `2`
+
+#### `hbase.client.max.perregion.tasks` [!toc]
+
+**Description:** The maximum number of concurrent mutation tasks the client will maintain to a single Region. That is, if there is already hbase.client.max.perregion.tasks writes in progress for this region, new puts won't be sent to this region until some writes finishes.
+**Default:** `1`
+
+#### `hbase.client.perserver.requests.threshold` [!toc]
+
+**Description:** The max number of concurrent pending requests for one server in all client threads (process level). Exceeding requests will be thrown ServerTooBusyException immediately to prevent user's threads being occupied and blocked by only one slow region server. If you use a fix number of threads to access HBase in a synchronous way, set this to a suitable value which is related to the number of threads will help you. See https://issues.apache.org/jira/browse/HBASE-16388 for details.
+**Default:** `2147483647`
+
+#### `hbase.client.scanner.caching` [!toc]
+
+**Description:** Number of rows that we try to fetch when calling next on a scanner if it is not served from (local, client) memory. This configuration works together with hbase.client.scanner.max.result.size to try and use the network efficiently. The default value is Integer.MAX_VALUE by default so that the network will fill the chunk size defined by hbase.client.scanner.max.result.size rather than be limited by a particular number of rows since the size of rows varies table to table. If you know ahead of time that you will not require more than a certain number of rows from a scan, this configuration should be set to that row limit via Scan#setCaching. Higher caching values will enable faster scanners but will eat up more memory and some calls of next may take longer and longer times when the cache is empty. Do not set this value such that the time between invocations is greater than the scanner timeout; i.e. hbase.client.scanner.timeout.period
+**Default:** `2147483647`
+
+#### `hbase.client.keyvalue.maxsize` [!toc]
+
+**Description:** Specifies the combined maximum allowed size of a KeyValue instance. This is to set an upper boundary for a single entry saved in a storage file. Since they cannot be split it helps avoiding that a region cannot be split any further because the data is too large. It seems wise to set this to a fraction of the maximum region size. Setting it to zero or less disables the check.
+**Default:** `10485760`
+
+#### `hbase.server.keyvalue.maxsize` [!toc]
+
+**Description:** Maximum allowed size of an individual cell, inclusive of value and all key components. A value of 0 or less disables the check. The default value is 10MB. This is a safety setting to protect the server from OOM situations.
+**Default:** `10485760`
+
+#### `hbase.client.scanner.timeout.period` [!toc]
+
+**Description:** Client scanner lease period in milliseconds.
+**Default:** `60000`
+
+#### `hbase.client.localityCheck.threadPoolSize` [!toc]
+
+**Description:**
+**Default:** `2`
+
+#### `hbase.bulkload.retries.number` [!toc]
+
+**Description:** Maximum retries. This is maximum number of iterations to atomic bulk loads are attempted in the face of splitting operations 0 means never give up.
+**Default:** `10`
+
+#### `hbase.compaction.after.bulkload.enable` [!toc]
+
+**Description:** Request Compaction after bulkload immediately. If bulkload is continuous, the triggered compactions may increase load, bring about performance side effect.
+**Default:** `false`
+
+#### `hbase.master.balancer.maxRitPercent` [!toc]
+
+**Description:** The max percent of regions in transition when balancing. The default value is 1.0. So there are no balancer throttling. If set this config to 0.01, It means that there are at most 1% regions in transition when balancing. Then the cluster's availability is at least 99% when balancing.
+**Default:** `1.0`
+
+#### `hbase.balancer.period` [!toc]
+
+**Description:** Period at which the region balancer runs in the Master, in milliseconds.
+**Default:** `300000`
+
+#### `hbase.master.oldwals.dir.updater.period` [!toc]
+
+**Description:** Period at which the oldWALs directory size calculator/updater will run in the Master, in milliseconds.
+**Default:** `300000`
+
+#### `hbase.regions.slop` [!toc]
+
+**Description:** The load balancer can trigger for several reasons. This value controls one of those reasons. Run the balancer if any regionserver has a region count outside the range of average +/- (average * slop) regions. If the value of slop is negative, disable sloppiness checks. The balancer can still run for other reasons, but sloppiness will not be one of them. If the value of slop is 0, run the balancer if any server has a region count more than 1 from the average. If the value of slop is 100, run the balancer if any server has a region count greater than 101 times the average. The default value of this parameter is 0.2, which runs the balancer if any server has a region count less than 80% of the average, or greater than 120% of the average. Note that for the default StochasticLoadBalancer, this does not guarantee any balancing actions will be taken, but only that the balancer will attempt to run.
+**Default:** `0.2`
+
+#### `hbase.normalizer.period` [!toc]
+
+**Description:** Period at which the region normalizer runs in the Master, in milliseconds.
+**Default:** `300000`
+
+#### `hbase.normalizer.split.enabled` [!toc]
+
+**Description:** Whether to split a region as part of normalization.
+**Default:** `true`
+
+#### `hbase.normalizer.merge.enabled` [!toc]
+
+**Description:** Whether to merge a region as part of normalization.
+**Default:** `true`
+
+#### `hbase.normalizer.merge.min.region.count` [!toc]
+
+**Description:** The minimum number of regions in a table to consider it for merge normalization.
+**Default:** `3`
+
+#### `hbase.normalizer.merge.min_region_age.days` [!toc]
+
+**Description:** The minimum age for a region to be considered for a merge, in days.
+**Default:** `3`
+
+#### `hbase.normalizer.merge.min_region_size.mb` [!toc]
+
+**Description:** The minimum size for a region to be considered for a merge, in whole MBs.
+**Default:** `1`
+
+#### `hbase.normalizer.merge.merge_request_max_number_of_regions` [!toc]
+
+**Description:** The maximum number of region count in a merge request for merge normalization.
+**Default:** `100`
+
+#### `hbase.table.normalization.enabled` [!toc]
+
+**Description:** This config is used to set default behaviour of normalizer at table level. To override this at table level one can set NORMALIZATION_ENABLED at table descriptor level and that property will be honored
+**Default:** `false`
+
+#### `hbase.server.thread.wakefrequency` [!toc]
+
+**Description:** In master side, this config is the period used for FS related behaviors: checking if hdfs is out of safe mode, setting or checking hbase.version file, setting or checking hbase.id file. Using default value should be fine. In regionserver side, this config is used in several places: flushing check interval, compaction check interval, wal rolling check interval. Specially, admin can tune flushing and compaction check interval by hbase.regionserver.flush.check.period and hbase.regionserver.compaction.check.period. (in milliseconds)
+**Default:** `10000`
+
+#### `hbase.regionserver.flush.check.period` [!toc]
+
+**Description:** It determines the flushing check period of PeriodicFlusher in regionserver. If unset, it uses hbase.server.thread.wakefrequency as default value. (in milliseconds)
+**Default:** `${hbase.server.thread.wakefrequency}`
+
+#### `hbase.regionserver.compaction.check.period` [!toc]
+
+**Description:** It determines the compaction check period of CompactionChecker in regionserver. If unset, it uses hbase.server.thread.wakefrequency as default value. (in milliseconds)
+**Default:** `${hbase.server.thread.wakefrequency}`
+
+#### `hbase.server.versionfile.writeattempts` [!toc]
+
+**Description:** How many times to retry attempting to write a version file before just aborting. Each attempt is separated by the hbase.server.thread.wakefrequency milliseconds.
+**Default:** `3`
+
+#### `hbase.hregion.memstore.flush.size` [!toc]
+
+**Description:** Memstore will be flushed to disk if size of the memstore exceeds this number of bytes. Value is checked by a thread that runs every hbase.server.thread.wakefrequency.
+**Default:** `134217728`
+
+#### `hbase.hregion.percolumnfamilyflush.size.lower.bound.min` [!toc]
+
+**Description:** If FlushLargeStoresPolicy is used and there are multiple column families, then every time that we hit the total memstore limit, we find out all the column families whose memstores exceed a "lower bound" and only flush them while retaining the others in memory. The "lower bound" will be "hbase.hregion.memstore.flush.size / column_family_number" by default unless value of this property is larger than that. If none of the families have their memstore size more than lower bound, all the memstores will be flushed (just as usual).
+**Default:** `16777216`
+
+#### `hbase.hregion.preclose.flush.size` [!toc]
+
+**Description:** If the memstores in a region are this size or larger when we go to close, run a "pre-flush" to clear out memstores before we put up the region closed flag and take the region offline. On close, a flush is run under the close flag to empty memory. During this time the region is offline and we are not taking on any writes. If the memstore content is large, this flush could take a long time to complete. The preflush is meant to clean out the bulk of the memstore before putting up the close flag and taking the region offline so the flush that runs under the close flag has little to do.
+**Default:** `5242880`
+
+#### `hbase.hregion.memstore.block.multiplier` [!toc]
+
+**Description:** Block updates if memstore has hbase.hregion.memstore.block.multiplier times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway memstore during spikes in update traffic. Without an upper-bound, memstore fills such that when it flushes the resultant flush files take a long time to compact or split, or worse, we OOME.
+**Default:** `4`
+
+#### `hbase.hregion.memstore.mslab.enabled` [!toc]
+
+**Description:** Enables the MemStore-Local Allocation Buffer, a feature which works to prevent heap fragmentation under heavy write loads. This can reduce the frequency of stop-the-world GC pauses on large heaps.
+**Default:** `true`
+
+#### `hbase.hregion.memstore.mslab.chunksize` [!toc]
+
+**Description:** The maximum byte size of a chunk in the MemStoreLAB. Unit: bytes
+**Default:** `2097152`
+
+#### `hbase.regionserver.offheap.global.memstore.size` [!toc]
+
+**Description:** The amount of off-heap memory all MemStores in a RegionServer may use. A value of 0 means that no off-heap memory will be used and all chunks in MSLAB will be HeapByteBuffer, otherwise the non-zero value means how many megabyte of off-heap memory will be used for chunks in MSLAB and all chunks in MSLAB will be DirectByteBuffer. Unit: megabytes.
+**Default:** `0`
+
+#### `hbase.hregion.memstore.mslab.max.allocation` [!toc]
+
+**Description:** The maximal size of one allocation in the MemStoreLAB, if the desired byte size exceed this threshold then it will be just allocated from JVM heap rather than MemStoreLAB.
+**Default:** `262144`
+
+#### `hbase.hregion.max.filesize` [!toc]
+
+**Description:** Maximum file size. If the sum of the sizes of a region's HFiles has grown to exceed this value, the region is split in two. There are two choices of how this option works, the first is when any store's size exceed the threshold then split, and the other is overall region's size exceed the threshold then split, it can be configed by hbase.hregion.split.overallfiles.
+**Default:** `10737418240`
+
+#### `hbase.hregion.split.overallfiles` [!toc]
+
+**Description:** If we should sum overall region files size when check to split.
+**Default:** `true`
+
+#### `hbase.hregion.majorcompaction` [!toc]
+
+**Description:** Time between major compactions, expressed in milliseconds. Set to 0 to disable time-based automatic major compactions. User-requested and size-based major compactions will still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause compaction to start at a somewhat-random time during a given window of time. The default value is 7 days, expressed in milliseconds. If major compactions are causing disruption in your environment, you can configure them to run at off-peak times for your deployment, or disable time-based major compactions by setting this parameter to 0, and run major compactions in a cron job or by another external mechanism.
+**Default:** `604800000`
+
+#### `hbase.hregion.majorcompaction.jitter` [!toc]
+
+**Description:** A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number, the closer the compactions will happen to the hbase.hregion.majorcompaction interval.
+**Default:** `0.50`
+
+#### `hbase.hstore.compactionThreshold` [!toc]
+
+**Description:** If more than or equal to this number of StoreFiles exist in any one Store (one StoreFile is written per flush of MemStore), a compaction is run to rewrite all StoreFiles into a single StoreFile. Larger values delay compaction, but when compaction does occur, it takes longer to complete.
+**Default:** `3`
+
+#### `hbase.regionserver.compaction.enabled` [!toc]
+
+**Description:** Enable/disable compactions on by setting true/false. We can further switch compactions dynamically with the compaction_switch shell command.
+**Default:** `true`
+
+#### `hbase.hstore.flusher.count` [!toc]
+
+**Description:** The number of flush threads. With fewer threads, the MemStore flushes will be queued. With more threads, the flushes will be executed in parallel, increasing the load on HDFS, and potentially causing more compactions.
+**Default:** `2`
+
+#### `hbase.hstore.blockingStoreFiles` [!toc]
+
+**Description:** If more than this number of StoreFiles exist in any one Store (one StoreFile is written per flush of MemStore), updates are blocked for this region until a compaction is completed, or until hbase.hstore.blockingWaitTime has been exceeded.
+**Default:** `16`
+
+#### `hbase.hstore.blockingWaitTime` [!toc]
+
+**Description:** The time for which a region will block updates after reaching the StoreFile limit defined by hbase.hstore.blockingStoreFiles. After this time has elapsed, the region will stop blocking updates even if a compaction has not been completed.
+**Default:** `90000`
+
+#### `hbase.hstore.compaction.min` [!toc]
+
+**Description:** The minimum number of StoreFiles which must be eligible for compaction before compaction can run. The goal of tuning hbase.hstore.compaction.min is to avoid ending up with too many tiny StoreFiles to compact. Setting this value to 2 would cause a minor compaction each time you have two StoreFiles in a Store, and this is probably not appropriate. If you set this value too high, all the other values will need to be adjusted accordingly. For most cases, the default value is appropriate (empty value here, results in 3 by code logic). In previous versions of HBase, the parameter hbase.hstore.compaction.min was named hbase.hstore.compactionThreshold.
+**Default:** `(empty)`
+
+#### `hbase.hstore.compaction.max` [!toc]
+
+**Description:** The maximum number of StoreFiles which will be selected for a single minor compaction, regardless of the number of eligible StoreFiles. Effectively, the value of hbase.hstore.compaction.max controls the length of time it takes a single compaction to complete. Setting it larger means that more StoreFiles are included in a compaction. For most cases, the default value is appropriate.
+**Default:** `10`
+
+#### `hbase.hstore.compaction.min.size` [!toc]
+
+**Description:** A StoreFile (or a selection of StoreFiles, when using ExploringCompactionPolicy) smaller than this size will always be eligible for minor compaction. HFiles this size or larger are evaluated by hbase.hstore.compaction.ratio to determine if they are eligible. Because this limit represents the "automatic include" limit for all StoreFiles smaller than this value, this value may need to be reduced in write-heavy environments where many StoreFiles in the 1-2 MB range are being flushed, because every StoreFile will be targeted for compaction and the resulting StoreFiles may still be under the minimum size and require further compaction. If this parameter is lowered, the ratio check is triggered more quickly. This addressed some issues seen in earlier versions of HBase but changing this parameter is no longer necessary in most situations. Default: 128 MB expressed in bytes.
+**Default:** `134217728`
+
+#### `hbase.hstore.compaction.max.size` [!toc]
+
+**Description:** A StoreFile (or a selection of StoreFiles, when using ExploringCompactionPolicy) larger than this size will be excluded from compaction. The effect of raising hbase.hstore.compaction.max.size is fewer, larger StoreFiles that do not get compacted often. If you feel that compaction is happening too often without much benefit, you can try raising this value. Default: the value of LONG.MAX_VALUE, expressed in bytes.
+**Default:** `9223372036854775807`
+
+#### `hbase.hstore.compaction.ratio` [!toc]
+
+**Description:** For minor compaction, this ratio is used to determine whether a given StoreFile which is larger than hbase.hstore.compaction.min.size is eligible for compaction. Its effect is to limit compaction of large StoreFiles. The value of hbase.hstore.compaction.ratio is expressed as a floating-point decimal. A large ratio, such as 10, will produce a single giant StoreFile. Conversely, a low value, such as .25, will produce behavior similar to the BigTable compaction algorithm, producing four StoreFiles. A moderate value of between 1.0 and 1.4 is recommended. When tuning this value, you are balancing write costs with read costs. Raising the value (to something like 1.4) will have more write costs, because you will compact larger StoreFiles. However, during reads, HBase will need to seek through fewer StoreFiles to accomplish the read. Consider this approach if you cannot take advantage of Bloom filters. Otherwise, you can lower this value to something like 1.0 to reduce the background cost of writes, and use Bloom filters to control the number of StoreFiles touched during reads. For most cases, the default value is appropriate.
+**Default:** `1.2F`
+
+#### `hbase.hstore.compaction.ratio.offpeak` [!toc]
+
+**Description:** Allows you to set a different (by default, more aggressive) ratio for determining whether larger StoreFiles are included in compactions during off-peak hours. Works in the same way as hbase.hstore.compaction.ratio. Only applies if hbase.offpeak.start.hour and hbase.offpeak.end.hour are also enabled.
+**Default:** `5.0F`
+
+#### `hbase.hstore.time.to.purge.deletes` [!toc]
+
+**Description:** The amount of time to delay purging of delete markers with future timestamps. If unset, or set to 0, all delete markers, including those with future timestamps, are purged during the next major compaction. Otherwise, a delete marker is kept until the major compaction which occurs after the marker's timestamp plus the value of this setting, in milliseconds.
+**Default:** `0`
+
+#### `hbase.offpeak.start.hour` [!toc]
+
+**Description:** The start of off-peak hours, expressed as an integer between 0 and 23, inclusive. Set to -1 to disable off-peak.
+**Default:** `-1`
+
+#### `hbase.offpeak.end.hour` [!toc]
+
+**Description:** The end of off-peak hours, expressed as an integer between 0 and 23, inclusive. Set to -1 to disable off-peak.
+**Default:** `-1`
+
+#### `hbase.regionserver.thread.compaction.throttle` [!toc]
+
+**Description:** There are two different thread pools for compactions, one for large compactions and the other for small compactions. This helps to keep compaction of lean tables (such as hbase:meta) fast. If a compaction is larger than this threshold, it goes into the large compaction pool. In most cases, the default value is appropriate. Default: 2 x hbase.hstore.compaction.max x hbase.hregion.memstore.flush.size (which defaults to 128MB). The value field assumes that the value of hbase.hregion.memstore.flush.size is unchanged from the default.
+**Default:** `2684354560`
+
+#### `hbase.regionserver.majorcompaction.pagecache.drop` [!toc]
+
+**Description:** Specifies whether to drop pages read/written into the system page cache by major compactions. Setting it to true helps prevent major compactions from polluting the page cache, which is almost always required, especially for clusters with low/moderate memory to storage ratio.
+**Default:** `true`
+
+#### `hbase.regionserver.minorcompaction.pagecache.drop` [!toc]
+
+**Description:** Specifies whether to drop pages read/written into the system page cache by minor compactions. Setting it to true helps prevent minor compactions from polluting the page cache, which is most beneficial on clusters with low memory to storage ratio or very write heavy clusters. You may want to set it to false under moderate to low write workload when bulk of the reads are on the most recently written data.
+**Default:** `true`
+
+#### `hbase.hstore.compaction.kv.max` [!toc]
+
+**Description:** The maximum number of KeyValues to read and then write in a batch when flushing or compacting. Set this lower if you have big KeyValues and problems with Out Of Memory Exceptions Set this higher if you have wide, small rows.
+**Default:** `10`
+
+#### `hbase.storescanner.parallel.seek.enable` [!toc]
+
+**Description:** Enables StoreFileScanner parallel-seeking in StoreScanner, a feature which can reduce response latency under special conditions.
+**Default:** `false`
+
+#### `hbase.storescanner.parallel.seek.threads` [!toc]
+
+**Description:** The default thread pool size if parallel-seeking feature enabled.
+**Default:** `10`
+
+#### `hfile.block.cache.policy` [!toc]
+
+**Description:** The eviction policy for the L1 block cache (LRU or TinyLFU).
+**Default:** `LRU`
+
+#### `hfile.block.cache.size` [!toc]
+
+**Description:** Percentage of maximum heap (-Xmx setting) to allocate to block cache used by a StoreFile. Default of 0.4 means allocate 40%. Set to 0 to disable but it's not recommended; you need at least enough cache to hold the storefile indices.
+**Default:** `0.4`
+
+#### `hfile.block.cache.memory.size` [!toc]
+
+**Description:** Defines the maximum heap memory allocated for the HFile block cache, specified in bytes or human-readable formats like '10m' for megabytes or '10g' for gigabytes. This configuration allows setting an absolute memory size instead of a percentage of the maximum heap. Takes precedence over hfile.block.cache.size if both are specified.
+**Default:** `(empty)`
+
+#### `hfile.block.index.cacheonwrite` [!toc]
+
+**Description:** This allows to put non-root multi-level index blocks into the block cache at the time the index is being written.
+**Default:** `false`
+
+#### `hfile.index.block.max.size` [!toc]
+
+**Description:** When the size of a leaf-level, intermediate-level, or root-level index block in a multi-level block index grows to this size, the block is written out and a new block is started.
+**Default:** `131072`
+
+#### `hbase.bucketcache.ioengine` [!toc]
+
+**Description:** Where to store the contents of the bucketcache. One of: offheap, file, files, mmap or pmem. If a file or files, set it to file(s):PATH_TO_FILE. mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE. 'pmem' is bucket cache over a file on the persistent memory device. Use pmem:PATH_TO_FILE. See http://hbase.apache.org/book.html#offheap.blockcache for more information.
+**Default:** `(empty)`
+
+#### `hbase.hstore.compaction.throughput.lower.bound` [!toc]
+
+**Description:** The target lower bound on aggregate compaction throughput, in bytes/sec. Allows you to tune the minimum available compaction throughput when the PressureAwareCompactionThroughputController throughput controller is active. (It is active by default.)
+**Default:** `52428800`
+
+#### `hbase.hstore.compaction.throughput.higher.bound` [!toc]
+
+**Description:** The target upper bound on aggregate compaction throughput, in bytes/sec. Allows you to control aggregate compaction throughput demand when the PressureAwareCompactionThroughputController throughput controller is active. (It is active by default.) The maximum throughput will be tuned between the lower and upper bounds when compaction pressure is within the range [0.0, 1.0]. If compaction pressure is 1.0 or greater the higher bound will be ignored until pressure returns to the normal range.
+**Default:** `104857600`
+
+#### `hbase.bucketcache.size` [!toc]
+
+**Description:** It is the total capacity in megabytes of BucketCache. Default: 0.0
+**Default:** `(empty)`
+
+#### `hbase.bucketcache.bucket.sizes` [!toc]
+
+**Description:** A comma-separated list of sizes for buckets for the bucketcache. Can be multiple sizes. List block sizes in order from smallest to largest. The sizes you use will depend on your data access patterns. Must be a multiple of 256 else you will run into 'java.io.IOException: Invalid HFile block magic' when you go to read from cache. If you specify no values here, then you pick up the default bucketsizes set in code (See BucketAllocator#DEFAULT_BUCKET_SIZES).
+**Default:** `(empty)`
+
+#### `hfile.format.version` [!toc]
+
+**Description:** The HFile format version to use for new files. Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags). Also see the configuration 'hbase.replication.rpc.codec'.
+**Default:** `3`
+
+#### `hfile.block.bloom.cacheonwrite` [!toc]
+
+**Description:** Enables cache-on-write for inline blocks of a compound Bloom filter.
+**Default:** `false`
+
+#### `io.storefile.bloom.block.size` [!toc]
+
+**Description:** The size in bytes of a single block ("chunk") of a compound Bloom filter. This size is approximate, because Bloom blocks can only be inserted at data block boundaries, and the number of keys per data block varies.
+**Default:** `131072`
+
+#### `hbase.rs.cacheblocksonwrite` [!toc]
+
+**Description:** Whether an HFile block should be added to the block cache when the block is finished.
+**Default:** `false`
+
+#### `hbase.rpc.timeout` [!toc]
+
+**Description:** This is for the RPC layer to define how long (millisecond) HBase client applications take for a remote call to time out. It uses pings to check connections but will eventually throw a TimeoutException.
+**Default:** `60000`
+
+#### `hbase.client.operation.timeout` [!toc]
+
+**Description:** Operation timeout is a top-level restriction (millisecond) that makes sure a blocking operation in Table will not be blocked more than this. In each operation, if rpc request fails because of timeout or other reason, it will retry until success or throw RetriesExhaustedException. But if the total time being blocking reach the operation timeout before retries exhausted, it will break early and throw SocketTimeoutException.
+**Default:** `1200000`
+
+#### `hbase.client.connection.metacache.invalidate-interval.ms` [!toc]
+
+**Description:** Interval in milliseconds of checking and invalidating meta cache when table disabled or dropped, when set to zero means disable checking, suggest set it to 24h or a higher value, because disable/delete table usually not very frequently.
+**Default:** `0`
+
+#### `hbase.cells.scanned.per.heartbeat.check` [!toc]
+
+**Description:** The number of cells scanned in between heartbeat checks. Heartbeat checks occur during the processing of scans to determine whether or not the server should stop scanning in order to send back a heartbeat message to the client. Heartbeat messages are used to keep the client-server connection alive during long running scans. Small values mean that the heartbeat checks will occur more often and thus will provide a tighter bound on the execution time of the scan. Larger values mean that the heartbeat checks occur less frequently
+**Default:** `10000`
+
+#### `hbase.rpc.shortoperation.timeout` [!toc]
+
+**Description:** This is another version of "hbase.rpc.timeout". For those RPC operation within cluster, we rely on this configuration to set a short timeout limitation for short operation. For example, short rpc timeout for region server's trying to report to active master can benefit quicker master failover process.
+**Default:** `10000`
+
+#### `hbase.ipc.client.tcpnodelay` [!toc]
+
+**Description:** Set no delay on rpc socket connections. See http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay()
+**Default:** `true`
+
+#### `hbase.unsafe.regionserver.hostname` [!toc]
+
+**Description:** This config is for experts: don't set its value unless you really know what you are doing. When set to a non-empty value, this represents the (external facing) hostname for the underlying server. See https://issues.apache.org/jira/browse/HBASE-12954 for details.
+**Default:** `(empty)`
+
+#### `hbase.unsafe.regionserver.hostname.disable.master.reversedns` [!toc]
+
+**Description:** This config is for experts: don't set its value unless you really know what you are doing. When set to true, regionserver will use the current node hostname for the servername and HMaster will skip reverse DNS lookup and use the hostname sent by regionserver instead. Note that this config and hbase.unsafe.regionserver.hostname are mutually exclusive. See https://issues.apache.org/jira/browse/HBASE-18226 for more details.
+**Default:** `false`
+
+#### `hbase.master.keytab.file` [!toc]
+
+**Description:** Full path to the kerberos keytab file to use for logging in the configured HMaster server principal.
+**Default:** `(empty)`
+
+#### `hbase.master.kerberos.principal` [!toc]
+
+**Description:** Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name that should be used to run the HMaster process. The principal name should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname portion, it will be replaced with the actual hostname of the running instance.
+**Default:** `(empty)`
+
+#### `hbase.regionserver.keytab.file` [!toc]
+
+**Description:** Full path to the kerberos keytab file to use for logging in the configured HRegionServer server principal.
+**Default:** `(empty)`
+
+#### `hbase.regionserver.kerberos.principal` [!toc]
+
+**Description:** Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name that should be used to run the HRegionServer process. The principal name should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname portion, it will be replaced with the actual hostname of the running instance. An entry for this principal must exist in the file specified in hbase.regionserver.keytab.file
+**Default:** `(empty)`
+
+#### `hadoop.policy.file` [!toc]
+
+**Description:** The policy configuration file used by RPC servers to make authorization decisions on client requests. Only used when HBase security is enabled.
+**Default:** `hbase-policy.xml`
+
+#### `hbase.superuser` [!toc]
+
+**Description:** List of users or groups (comma-separated), who are allowed full privileges, regardless of stored ACLs, across the cluster. Only used when HBase security is enabled. Group names should be prefixed with "@".
+**Default:** `(empty)`
+
+#### `hbase.auth.key.update.interval` [!toc]
+
+**Description:** The update interval for master key for authentication tokens in servers in milliseconds. Only used when HBase security is enabled.
+**Default:** `86400000`
+
+#### `hbase.auth.token.max.lifetime` [!toc]
+
+**Description:** The maximum lifetime in milliseconds after which an authentication token expires. Only used when HBase security is enabled.
+**Default:** `604800000`
+
+#### `hbase.ipc.client.fallback-to-simple-auth-allowed` [!toc]
+
+**Description:** When a client is configured to attempt a secure connection, but attempts to connect to an insecure server, that server may instruct the client to switch to SASL SIMPLE (unsecure) authentication. This setting controls whether or not the client will accept this instruction from the server. When false (the default), the client will not allow the fallback to SIMPLE authentication, and will abort the connection.
+**Default:** `false`
+
+#### `hbase.ipc.server.fallback-to-simple-auth-allowed` [!toc]
+
+**Description:** When a server is configured to require secure connections, it will reject connection attempts from clients using SASL SIMPLE (unsecure) authentication. This setting allows secure servers to accept SASL SIMPLE connections from clients when the client requests. When false (the default), the server will not allow the fallback to SIMPLE authentication, and will reject the connection. WARNING: This setting should ONLY be used as a temporary measure while converting clients over to secure authentication. It MUST BE DISABLED for secure operation.
+**Default:** `false`
+
+#### `hbase.unsafe.client.kerberos.hostname.disable.reversedns` [!toc]
+
+**Description:** This config is for experts: don't set its value unless you really know what you are doing. When set to true, HBase client using SASL Kerberos will skip reverse DNS lookup and use provided hostname of the destination for the principal instead. See https://issues.apache.org/jira/browse/HBASE-25665 for more details.
+**Default:** `false`
+
+#### `hbase.display.keys` [!toc]
+
+**Description:** When this is set to true the webUI and such will display all start/end keys as part of the table details, region names, etc. When this is set to false, the keys are hidden.
+**Default:** `true`
+
+#### `hbase.coprocessor.enabled` [!toc]
+
+**Description:** Enables or disables coprocessor loading. If 'false' (disabled), any other coprocessor related configuration will be ignored.
+**Default:** `true`
+
+#### `hbase.coprocessor.user.enabled` [!toc]
+
+**Description:** Enables or disables user (aka. table) coprocessor loading. If 'false' (disabled), any table coprocessor attributes in table descriptors will be ignored. If "hbase.coprocessor.enabled" is 'false' this setting has no effect.
+**Default:** `true`
+
+#### `hbase.coprocessor.region.classes` [!toc]
+
+**Description:** A comma-separated list of region observer or endpoint coprocessors that are loaded by default on all tables. For any override coprocessor method, these classes will be called in order. After implementing your own Coprocessor, add it to HBase's classpath and add the fully qualified class name here. A coprocessor can also be loaded on demand by setting HTableDescriptor or the HBase shell.
+**Default:** `(empty)`
+
+#### `hbase.coprocessor.master.classes` [!toc]
+
+**Description:** A comma-separated list of org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are loaded by default on the active HMaster process. For any implemented coprocessor methods, the listed classes will be called in order. After implementing your own MasterObserver, just put it in HBase's classpath and add the fully qualified class name here.
+**Default:** `(empty)`
+
+#### `hbase.coprocessor.abortonerror` [!toc]
+
+**Description:** Set to true to cause the hosting server (master or regionserver) to abort if a coprocessor fails to load, fails to initialize, or throws an unexpected Throwable object. Setting this to false will allow the server to continue execution but the system wide state of the coprocessor in question will become inconsistent as it will be properly executing in only a subset of servers, so this is most useful for debugging only.
+**Default:** `true`
+
+#### `hbase.rest.port` [!toc]
+
+**Description:** The port for the HBase REST server.
+**Default:** `8080`
+
+#### `hbase.rest.readonly` [!toc]
+
+**Description:** Defines the mode the REST server will be started in. Possible values are: false: All HTTP methods are permitted - GET/PUT/POST/DELETE. true: Only the GET method is permitted.
+**Default:** `false`
+
+#### `hbase.rest.threads.max` [!toc]
+
+**Description:** The maximum number of threads of the REST server thread pool. Threads in the pool are reused to process REST requests. This controls the maximum number of requests processed concurrently. It may help to control the memory used by the REST server to avoid OOM issues. If the thread pool is full, incoming requests will be queued up and wait for some free threads.
+**Default:** `100`
+
+#### `hbase.rest.threads.min` [!toc]
+
+**Description:** The minimum number of threads of the REST server thread pool. The thread pool always has at least these number of threads so the REST server is ready to serve incoming requests.
+**Default:** `2`
+
+#### `hbase.rest.support.proxyuser` [!toc]
+
+**Description:** Enables running the REST server to support proxy-user mode.
+**Default:** `false`
+
+#### `hbase.defaults.for.version.skip` [!toc]
+
+**Description:** Set to true to skip the 'hbase.defaults.for.version' check. Setting this to true can be useful in contexts other than the other side of a maven generation; i.e. running in an IDE. You'll want to set this boolean to true to avoid seeing the RuntimeException complaint: "hbase-default.xml file seems to be for and old version of HBase (\${hbase.version}), this version is X.X.X-SNAPSHOT"
+**Default:** `false`
+
+#### `hbase.table.lock.enable` [!toc]
+
+**Description:** Set to true to enable locking the table in zookeeper for schema change operations. Table locking from master prevents concurrent schema modifications to corrupt table state.
+**Default:** `true`
+
+#### `hbase.table.max.rowsize` [!toc]
+
+**Description:** Maximum size of single row in bytes (default is 1 Gb) for Get'ting or Scan'ning without in-row scan flag set. If row size exceeds this limit RowTooBigException is thrown to client.
+**Default:** `1073741824`
+
+#### `hbase.thrift.minWorkerThreads` [!toc]
+
+**Description:** The "core size" of the thread pool. New threads are created on every connection until this many threads are created.
+**Default:** `16`
+
+#### `hbase.thrift.maxWorkerThreads` [!toc]
+
+**Description:** The maximum size of the thread pool. When the pending request queue overflows, new threads are created until their number reaches this number. After that, the server starts dropping connections.
+**Default:** `1000`
+
+#### `hbase.thrift.maxQueuedRequests` [!toc]
+
+**Description:** The maximum number of pending Thrift connections waiting in the queue. If there are no idle threads in the pool, the server queues requests. Only when the queue overflows, new threads are added, up to hbase.thrift.maxQueuedRequests threads.
+**Default:** `1000`
+
+#### `hbase.regionserver.thrift.framed` [!toc]
+
+**Description:** Use Thrift TFramedTransport on the server side. This is the recommended transport for thrift servers and requires a similar setting on the client side. Changing this to false will select the default transport, vulnerable to DoS when malformed requests are issued due to THRIFT-601.
+**Default:** `false`
+
+#### `hbase.regionserver.thrift.framed.max_frame_size_in_mb` [!toc]
+
+**Description:** Default frame size when using framed transport, in MB
+**Default:** `2`
+
+#### `hbase.regionserver.thrift.compact` [!toc]
+
+**Description:** Use Thrift TCompactProtocol binary serialization protocol.
+**Default:** `false`
+
+#### `hbase.rootdir.perms` [!toc]
+
+**Description:** FS Permissions for the root data subdirectory in a secure (kerberos) setup. When master starts, it creates the rootdir with this permissions or sets the permissions if it does not match.
+**Default:** `700`
+
+#### `hbase.wal.dir.perms` [!toc]
+
+**Description:** FS Permissions for the root WAL directory in a secure(kerberos) setup. When master starts, it creates the WAL dir with this permissions or sets the permissions if it does not match.
+**Default:** `700`
+
+#### `hbase.data.umask.enable` [!toc]
+
+**Description:** Enable, if true, that file permissions should be assigned to the files written by the regionserver
+**Default:** `false`
+
+#### `hbase.data.umask` [!toc]
+
+**Description:** File permissions that should be used to write data files when hbase.data.umask.enable is true
+**Default:** `000`
+
+#### `hbase.snapshot.enabled` [!toc]
+
+**Description:** Set to true to allow snapshots to be taken / restored / cloned.
+**Default:** `true`
+
+#### `hbase.snapshot.restore.take.failsafe.snapshot` [!toc]
+
+**Description:** Set to true to take a snapshot before the restore operation. The snapshot taken will be used in case of failure, to restore the previous state. At the end of the restore operation this snapshot will be deleted
+**Default:** `true`
+
+#### `hbase.snapshot.restore.failsafe.name` [!toc]
+
+**Description:** Name of the failsafe snapshot taken by the restore operation. You can use the {snapshot.name}, {table.name} and {restore.timestamp} variables to create a name based on what you are restoring.
+**Default:** `hbase-failsafe-{snapshot.name}-{restore.timestamp}`
+
+#### `hbase.snapshot.working.dir` [!toc]
+
+**Description:** Location where the snapshotting process will occur. The location of the completed snapshots will not change, but the temporary directory where the snapshot process occurs will be set to this location. This can be a separate filesystem than the root directory, for performance increase purposes. See HBASE-21098 for more information
+**Default:** `(empty)`
+
+#### `hbase.server.compactchecker.interval.multiplier` [!toc]
+
+**Description:** The number that determines how often we scan to see if compaction is necessary. Normally, compactions are done after some events (such as memstore flush), but if region didn't receive a lot of writes for some time, or due to different compaction policies, it may be necessary to check it periodically. The interval between checks is hbase.server.compactchecker.interval.multiplier multiplied by hbase.server.thread.wakefrequency.
+**Default:** `1000`
+
+#### `hbase.lease.recovery.timeout` [!toc]
+
+**Description:** How long we wait on dfs lease recovery in total before giving up.
+**Default:** `900000`
+
+#### `hbase.lease.recovery.dfs.timeout` [!toc]
+
+**Description:** How long between dfs recover lease invocations. Should be larger than the sum of the time it takes for the namenode to issue a block recovery command as part of datanode; dfs.heartbeat.interval and the time it takes for the primary datanode, performing block recovery to timeout on a dead datanode; usually dfs.client.socket-timeout. See the end of HBASE-8389 for more.
+**Default:** `64000`
+
+#### `hbase.column.max.version` [!toc]
+
+**Description:** New column family descriptors will use this value as the default number of versions to keep.
+**Default:** `1`
+
+#### `dfs.client.read.shortcircuit` [!toc]
+
+**Description:** If set to true, this configuration parameter enables short-circuit local reads.
+**Default:** `(empty)`
+
+#### `dfs.domain.socket.path` [!toc]
+
+**Description:** This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients, if dfs.client.read.shortcircuit is set to true. If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode. Be careful about permissions for the directory that hosts the shared domain socket; dfsclient will complain if open to other users than the HBase user.
+**Default:** `(empty)`
+
+#### `hbase.dfs.client.read.shortcircuit.buffer.size` [!toc]
+
+**Description:** If the DFSClient configuration dfs.client.read.shortcircuit.buffer.size is unset, we will use what is configured here as the short circuit read default direct byte buffer size. DFSClient native default is 1MB; HBase keeps its HDFS files open so number of file blocks * 1MB soon starts to add up and threaten OOME because of a shortage of direct memory. So, we set it down from the default. Make it > the default hbase block size set in the HColumnDescriptor which is usually 64k.
+**Default:** `131072`
+
+#### `hbase.regionserver.checksum.verify` [!toc]
+
+**Description:** If set to true (the default), HBase verifies the checksums for hfile blocks. HBase writes checksums inline with the data when it writes out hfiles. HDFS (as of this writing) writes checksums to a separate file than the data file necessitating extra seeks. Setting this flag saves some on i/o. Checksum verification by HDFS will be internally disabled on hfile streams when this flag is set. If the hbase-checksum verification fails, we will switch back to using HDFS checksums (so do not disable HDFS checksums! And besides this feature applies to hfiles only, not to WALs). If this parameter is set to false, then hbase will not verify any checksums, instead it will depend on checksum verification being done in the HDFS client.
+**Default:** `true`
+
+#### `hbase.hstore.bytes.per.checksum` [!toc]
+
+**Description:** Number of bytes in a newly created checksum chunk for HBase-level checksums in hfile blocks.
+**Default:** `16384`
+
+#### `hbase.hstore.checksum.algorithm` [!toc]
+
+**Description:** Name of an algorithm that is used to compute checksums. Possible values are NULL, CRC32, CRC32C.
+**Default:** `CRC32C`
+
+#### `hbase.client.scanner.max.result.size` [!toc]
+
+**Description:** Maximum number of bytes returned when calling a scanner's next method. Note that when a single row is larger than this limit the row is still returned completely. The default value is 2MB, which is good for 1ge networks. With faster and/or high latency networks this value should be increased.
+**Default:** `2097152`
+
+#### `hbase.server.scanner.max.result.size` [!toc]
+
+**Description:** Maximum number of bytes returned when calling a scanner's next method. Note that when a single row is larger than this limit the row is still returned completely. The default value is 100MB. This is a safety setting to protect the server from OOM situations.
+**Default:** `104857600`
+
+#### `hbase.status.published` [!toc]
+
+**Description:** This setting activates the publication by the master of the status of the region server. When a region server dies and its recovery starts, the master will push this information to the client application, to let them cut the connection immediately instead of waiting for a timeout.
+**Default:** `false`
+
+#### `hbase.status.publisher.class` [!toc]
+
+**Description:** Implementation of the status publication with a multicast message.
+**Default:** `org.apache.hadoop.hbase.master.ClusterStatusPublisher$MulticastPublisher`
+
+#### `hbase.status.listener.class` [!toc]
+
+**Description:** Implementation of the status listener with a multicast message.
+**Default:** `org.apache.hadoop.hbase.client.ClusterStatusListener$MulticastListener`
+
+#### `hbase.status.multicast.address.ip` [!toc]
+
+**Description:** Multicast address to use for the status publication by multicast.
+**Default:** `226.1.1.3`
+
+#### `hbase.status.multicast.address.port` [!toc]
+
+**Description:** Multicast port to use for the status publication by multicast.
+**Default:** `16100`
+
+#### `hbase.dynamic.jars.dir` [!toc]
+
+**Description:** The directory from which the custom filter JARs can be loaded dynamically by the region server without the need to restart. However, an already loaded filter/co-processor class would not be un-loaded. See HBASE-1936 for more details. Does not apply to coprocessors.
+**Default:** `${hbase.rootdir}/lib`
+
+#### `hbase.security.authentication` [!toc]
+
+**Description:** Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple' (no authentication), and 'kerberos'.
+**Default:** `simple`
+
+#### `hbase.rest.filter.classes` [!toc]
+
+**Description:** Servlet filters for REST service.
+**Default:** `org.apache.hadoop.hbase.rest.filter.GzipFilter`
+
+#### `hbase.master.loadbalancer.class` [!toc]
+
+**Description:** Class used to execute the regions balancing when the period occurs. See the class comment for more on how it works http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html It replaces the DefaultLoadBalancer as the default (since renamed as the SimpleLoadBalancer).
+**Default:** `org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer`
+
+#### `hbase.master.loadbalance.bytable` [!toc]
+
+**Description:** Factor Table name when the balancer runs. Default: false.
+**Default:** `false`
+
+#### `hbase.master.normalizer.class` [!toc]
+
+**Description:** Class used to execute the region normalization when the period occurs. See the class comment for more on how it works http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
+**Default:** `org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer`
+
+#### `hbase.rest.csrf.enabled` [!toc]
+
+**Description:** Set to true to enable protection against cross-site request forgery (CSRF)
+**Default:** `false`
+
+#### `hbase.rest-csrf.browser-useragents-regex` [!toc]
+
+**Description:** A comma-separated list of regular expressions used to match against an HTTP request's User-Agent header when protection against cross-site request forgery (CSRF) is enabled for REST server by setting hbase.rest.csrf.enabled to true. If the incoming User-Agent matches any of these regular expressions, then the request is considered to be sent by a browser, and therefore CSRF prevention is enforced. If the request's User-Agent does not match any of these regular expressions, then the request is considered to be sent by something other than a browser, such as scripted automation. In this case, CSRF is not a potential attack vector, so the prevention is not enforced. This helps achieve backwards-compatibility with existing automation that has not been updated to send the CSRF prevention header.
+**Default:** `^Mozilla.*,^Opera.*`
+
+#### `hbase.security.exec.permission.checks` [!toc]
+
+**Description:** If this setting is enabled and ACL based access control is active (the AccessController coprocessor is installed either as a system coprocessor or on a table as a table coprocessor) then you must grant all relevant users EXEC privilege if they require the ability to execute coprocessor endpoint calls. EXEC privilege, like any other permission, can be granted globally to a user, or to a user on a per table or per namespace basis. For more information on coprocessor endpoints, see the coprocessor section of the HBase online manual. For more information on granting or revoking permissions using the AccessController, see the security section of the HBase online manual.
+**Default:** `false`
+
+#### `hbase.procedure.regionserver.classes` [!toc]
+
+**Description:** A comma-separated list of org.apache.hadoop.hbase.procedure.RegionServerProcedureManager procedure managers that are loaded by default on the active HRegionServer process. The lifecycle methods (init/start/stop) will be called by the active HRegionServer process to perform the specific globally barriered procedure. After implementing your own RegionServerProcedureManager, just put it in HBase's classpath and add the fully qualified class name here.
+**Default:** `(empty)`
+
+#### `hbase.procedure.master.classes` [!toc]
+
+**Description:** A comma-separated list of org.apache.hadoop.hbase.procedure.MasterProcedureManager procedure managers that are loaded by default on the active HMaster process. A procedure is identified by its signature and users can use the signature and an instant name to trigger an execution of a globally barriered procedure. After implementing your own MasterProcedureManager, just put it in HBase's classpath and add the fully qualified class name here.
+**Default:** `(empty)`
+
+#### `hbase.coordinated.state.manager.class` [!toc]
+
+**Description:** Fully qualified name of class implementing coordinated state manager.
+**Default:** `org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager`
+
+#### `hbase.regionserver.storefile.refresh.period` [!toc]
+
+**Description:** The period (in milliseconds) for refreshing the store files for the secondary regions. 0 means this feature is disabled. Secondary regions sees new files (from flushes and compactions) from primary once the secondary region refreshes the list of files in the region (there is no notification mechanism). But too frequent refreshes might cause extra Namenode pressure. If the files cannot be refreshed for longer than HFile TTL (hbase.master.hfilecleaner.ttl) the requests are rejected. Configuring HFile TTL to a larger value is also recommended with this setting.
+**Default:** `0`
+
+#### `hbase.region.replica.replication.enabled` [!toc]
+
+**Description:** Whether asynchronous WAL replication to the secondary region replicas is enabled or not. We have a separated implementation for replicating the WAL without using the general inter-cluster replication framework, so now we will not add any replication peers.
+**Default:** `false`
+
+#### `hbase.http.filter.initializers` [!toc]
+
+**Description:** A comma separated list of class names. Each class in the list must extend org.apache.hadoop.hbase.http.FilterInitializer. The corresponding Filter will be initialized. Then, the Filter will be applied to all user facing jsp and servlet web pages. The ordering of the list defines the ordering of the filters. The default StaticUserWebFilter add a user principal as defined by the hbase.http.staticuser.user property.
+**Default:** `org.apache.hadoop.hbase.http.lib.StaticUserWebFilter`
+
+#### `hbase.security.visibility.mutations.checkauths` [!toc]
+
+**Description:** This property if enabled, will check whether the labels in the visibility expression are associated with the user issuing the mutation
+**Default:** `false`
+
+#### `hbase.http.max.threads` [!toc]
+
+**Description:** The maximum number of threads that the HTTP Server will create in its ThreadPool.
+**Default:** `16`
+
+#### `hbase.http.metrics.servlets` [!toc]
+
+**Description:** Comma separated list of servlet names to enable for metrics collection. Supported servlets are jmx, metrics, prometheus
+**Default:** `jmx,metrics,prometheus`
+
+#### `hbase.replication.rpc.codec` [!toc]
+
+**Description:** The codec that is to be used when replication is enabled so that the tags are also replicated. This is used along with HFileV3 which supports tags in them. If tags are not used or if the hfile version used is HFileV2 then KeyValueCodec can be used as the replication codec. Note that using KeyValueCodecWithTags for replication when there are no tags causes no harm.
+**Default:** `org.apache.hadoop.hbase.codec.KeyValueCodecWithTags`
+
+#### `hbase.replication.source.maxthreads` [!toc]
+
+**Description:** The maximum number of threads any replication source will use for shipping edits to the sinks in parallel. This also limits the number of chunks each replication batch is broken into. Larger values can improve the replication throughput between the master and slave clusters. The default of 10 will rarely need to be changed.
+**Default:** `10`
+
+#### `hbase.http.staticuser.user` [!toc]
+
+**Description:** The user name to filter as, on static web filters while rendering content. An example use is the HDFS web UI (user to be used for browsing files).
+**Default:** `dr.stack`
+
+#### `hbase.regionserver.handler.abort.on.error.percent` [!toc]
+
+**Description:** The percent of region server RPC threads failed to abort RS. -1 Disable aborting; 0 Abort if even a single handler has died; 0.x Abort only when this percent of handlers have died; 1 Abort only all of the handers have died.
+**Default:** `0.5`
+
+#### `hbase.mob.file.cache.size` [!toc]
+
+**Description:** Number of opened file handlers to cache. A larger value will benefit reads by providing more file handlers per mob file cache and would reduce frequent file opening and closing. However, if this is set too high, this could lead to a "too many opened file handlers" The default value is 1000.
+**Default:** `1000`
+
+#### `hbase.mob.cache.evict.period` [!toc]
+
+**Description:** The amount of time in seconds before the mob cache evicts cached mob files. The default value is 3600 seconds.
+**Default:** `3600`
+
+#### `hbase.mob.cache.evict.remain.ratio` [!toc]
+
+**Description:** The ratio (between 0.0 and 1.0) of files that remains cached after an eviction is triggered when the number of cached mob files exceeds the hbase.mob.file.cache.size. The default value is 0.5f.
+**Default:** `0.5f`
+
+#### `hbase.master.mob.cleaner.period` [!toc]
+
+**Description:** The period that MobFileCleanerChore runs. The unit is second. The default value is one day. The MOB file name uses only the date part of the file creation time in it. We use this time for deciding TTL expiry of the files. So the removal of TTL expired files might be delayed. The max delay might be 24 hrs.
+**Default:** `86400`
+
+#### `hbase.mob.major.compaction.region.batch.size` [!toc]
+
+**Description:** The max number of a MOB table regions that is allowed in a batch of the mob compaction. By setting this number to a custom value, users can control the overall effect of a major compaction of a large MOB-enabled table. Default is 0 - means no limit - all regions of a MOB table will be compacted at once
+**Default:** `0`
+
+#### `hbase.mob.compaction.chore.period` [!toc]
+
+**Description:** The period that MobCompactionChore runs. The unit is second. The default value is one week.
+**Default:** `604800`
+
+#### `hbase.snapshot.master.timeout.millis` [!toc]
+
+**Description:** Timeout for master for the snapshot procedure execution.
+**Default:** `300000`
+
+#### `hbase.snapshot.region.timeout` [!toc]
+
+**Description:** Timeout for regionservers to keep threads in snapshot request pool waiting.
+**Default:** `300000`
+
+#### `hbase.rpc.rows.warning.threshold` [!toc]
+
+**Description:** Number of rows in a batch operation above which a warning will be logged. If hbase.client.write.buffer.maxmutations is not set, this will be used as fallback for that setting.
+**Default:** `5000`
+
+#### `hbase.master.wait.on.service.seconds` [!toc]
+
+**Description:** Default is 5 minutes. Make it 30 seconds for tests. See HBASE-19794 for some context.
+**Default:** `30`
+
+#### `hbase.master.cleaner.snapshot.interval` [!toc]
+
+**Description:** Snapshot Cleanup chore interval in milliseconds. The cleanup thread keeps running at this interval to find all snapshots that are expired based on TTL and delete them.
+**Default:** `1800000`
+
+#### `hbase.master.snapshot.ttl` [!toc]
+
+**Description:** Default Snapshot TTL to be considered when the user does not specify TTL while creating snapshot. Default value 0 indicates FOREVERE - snapshot should not be automatically deleted until it is manually deleted
+**Default:** `0`
+
+#### `hbase.master.regions.recovery.check.interval` [!toc]
+
+**Description:** Regions Recovery Chore interval in milliseconds. This chore keeps running at this interval to find all regions with configurable max store file ref count and reopens them.
+**Default:** `1200000`
+
+#### `hbase.regions.recovery.store.file.ref.count` [!toc]
+
+**Description:** Very large number of ref count on a compacted store file indicates that it is a ref leak on that object(compacted store file). Such files can not be removed after it is invalidated via compaction. Only way to recover in such scenario is to reopen the region which can release all resources, like the refcount, leases, etc. This config represents Store files Ref Count threshold value considered for reopening regions. Any region with compacted store files ref count > this value would be eligible for reopening by master. Here, we get the max refCount among all refCounts on all compacted away store files that belong to a particular region. Default value -1 indicates this feature is turned off. Only positive integer value should be provided to enable this feature.
+**Default:** `-1`
+
+#### `hbase.regionserver.slowlog.ringbuffer.size` [!toc]
+
+**Description:** Default size of ringbuffer to be maintained by each RegionServer in order to store online slowlog responses. This is an in-memory ring buffer of requests that were judged to be too slow in addition to the responseTooSlow logging. The in-memory representation would be complete. For more details, please look into Doc Section: Get Slow Response Log from shell
+**Default:** `256`
+
+#### `hbase.regionserver.slowlog.buffer.enabled` [!toc]
+
+**Description:** Indicates whether RegionServers have ring buffer running for storing Online Slow logs in FIFO manner with limited entries. The size of the ring buffer is indicated by config: hbase.regionserver.slowlog.ringbuffer.size The default value is false, turn this on and get latest slowlog responses with complete data.
+**Default:** `false`
+
+#### `hbase.regionserver.slowlog.systable.enabled` [!toc]
+
+**Description:** Should be enabled only if hbase.regionserver.slowlog.buffer.enabled is enabled. If enabled (true), all slow/large RPC logs would be persisted to system table hbase:slowlog (in addition to in-memory ring buffer at each RegionServer). The records are stored in increasing order of time. Operators can scan the table with various combination of ColumnValueFilter. More details are provided in the doc section: "Get Slow/Large Response Logs from System table hbase:slowlog"
+**Default:** `false`
+
+#### `hbase.master.metafixer.max.merge.count` [!toc]
+
+**Description:** Maximum regions to merge at a time when we fix overlaps noted in CJ consistency report, but avoid merging 100 regions in one go!
+**Default:** `64`
+
+#### `hbase.rpc.rows.size.threshold.reject` [!toc]
+
+**Description:** If value is true, RegionServer will abort batch requests of Put/Delete with number of rows in a batch operation exceeding threshold defined by value of config: hbase.rpc.rows.warning.threshold. The default value is false and hence, by default, only warning will be logged. This config should be turned on to prevent RegionServer from serving very large batch size of rows and this way we can improve CPU usages by discarding too large batch request.
+**Default:** `false`
+
+#### `hbase.namedqueue.provider.classes` [!toc]
+
+**Description:** Default values for NamedQueueService implementors. This comma separated full class names represent all implementors of NamedQueueService that we would like to be invoked by LogEvent handler service. One example of NamedQueue service is SlowLogQueueService which is used to store slow/large RPC logs in ringbuffer at each RegionServer. All implementors of NamedQueueService should be found under package: "org.apache.hadoop.hbase.namequeues.impl"
+**Default:** `org.apache.hadoop.hbase.namequeues.impl.SlowLogQueueService,org.apache.hadoop.hbase.namequeues.impl.BalancerDecisionQueueService,org.apache.hadoop.hbase.namequeues.impl.BalancerRejectionQueueService,org.apache.hadoop.hbase.namequeues.WALEventTrackerQueueService`
+
+#### `hbase.master.balancer.decision.buffer.enabled` [!toc]
+
+**Description:** Indicates whether active HMaster has ring buffer running for storing balancer decisions in FIFO manner with limited entries. The size of the ring buffer is indicated by config: hbase.master.balancer.decision.queue.size
+**Default:** `false`
+
+#### `hbase.master.balancer.rejection.buffer.enabled` [!toc]
+
+**Description:** Indicates whether active HMaster has ring buffer running for storing balancer rejection in FIFO manner with limited entries. The size of the ring buffer is indicated by config: hbase.master.balancer.rejection.queue.size
+**Default:** `false`
+
+#### `hbase.locality.inputstream.derive.enabled` [!toc]
+
+**Description:** If true, derive StoreFile locality metrics from the underlying DFSInputStream backing reads for that StoreFile. This value will update as the DFSInputStream's block locations are updated over time. Otherwise, locality is computed on StoreFile open, and cached until the StoreFile is closed.
+**Default:** `false`
+
+#### `hbase.locality.inputstream.derive.cache.period` [!toc]
+
+**Description:** If deriving StoreFile locality metrics from the underlying DFSInputStream, how long should the derived values be cached for. The derivation process may involve hitting the namenode, if the DFSInputStream's block list is incomplete.
+**Default:** `60000`
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/hbase-run-models.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/hbase-run-models.mdx
new file mode 100644
index 000000000000..92c4d3244542
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/hbase-run-models.mdx
@@ -0,0 +1,125 @@
+---
+title: "HBase run modes: Standalone and Distributed"
+description: "Understanding HBase deployment modes: standalone mode for development and distributed mode for production clusters."
+---
+
+HBase has two run modes: [standalone](/docs/configuration/hbase-run-models#standalone-hbase) and [distributed](/docs/configuration/hbase-run-models#distributed). Out of the box, HBase runs in standalone mode. Whatever your mode, you will need to configure HBase by editing files in the HBase _conf_ directory. At a minimum, you must edit `conf/hbase-env.sh` to tell HBase which java to use. In this file you set HBase environment variables such as the heapsize and other options for the `JVM`, the preferred location for log files, etc. Set `JAVA_HOME` to point at the root of your java install.
+
+## Standalone HBase
+
+This is the default mode. Standalone mode is what is described in the [quickstart](/docs/getting-started#quick-start---standalone-hbase) section. In standalone mode, HBase does not use HDFS — it uses the local filesystem instead — and it runs all HBase daemons and a local ZooKeeper all up in the same JVM. ZooKeeper binds to a well-known port so clients may talk to HBase.
+
+### Standalone HBase over HDFS
+
+A sometimes useful variation on standalone hbase has all daemons running inside the one JVM but rather than persist to the local filesystem, instead they persist to an HDFS instance.
+
+You might consider this profile when you are intent on a simple deploy profile, the loading is light, but the data must persist across node comings and goings. Writing to HDFS where data is replicated ensures the latter.
+
+To configure this standalone variant, edit your _hbase-site.xml_ setting _hbase.rootdir_ to point at a directory in your HDFS instance but then set _hbase.cluster.distributed_ to _false_. For example:
+
+```xml
+
+
+ hbase.rootdir
+ hdfs://namenode.example.org:9000/hbase
+
+
+ hbase.cluster.distributed
+ false
+
+
+```
+
+## Distributed
+
+Distributed mode can be subdivided into distributed but all daemons run on a single node — a.k.a. _pseudo-distributed_ — and _fully-distributed_ where the daemons are spread across all nodes in the cluster. The _pseudo-distributed_ vs. _fully-distributed_ nomenclature comes from Hadoop.
+
+Pseudo-distributed mode can run against the local filesystem or it can run against an instance of the _Hadoop Distributed File System_ (HDFS). Fully-distributed mode can ONLY run on HDFS. See the Hadoop [documentation](https://hadoop.apache.org/docs/current/) for how to set up HDFS. A good walk-through for setting up HDFS on Hadoop 2 can be found at [https://web.archive.org/web/20221007121526/https://www.alexjf.net/blog/distributed-systems/hadoop-yarn-installation-definitive-guide/](https://web.archive.org/web/20221007121526/https://www.alexjf.net/blog/distributed-systems/hadoop-yarn-installation-definitive-guide/).
+
+### Pseudo-distributed
+
+
+ A quickstart has been added to the
+ [quickstart](/docs/getting-started#quick-start---standalone-hbase) chapter. See
+ [quickstart-pseudo](/docs/getting-started#pseudo-distributed-for-local-testing). Some of the
+ information that was originally in this section has been moved there.
+
+
+A pseudo-distributed mode is simply a fully-distributed mode run on a single host. Use this HBase configuration for testing and prototyping purposes only. Do not use this configuration for production or for performance evaluation.
+
+## Fully-distributed
+
+By default, HBase runs in stand-alone mode. Both stand-alone mode and pseudo-distributed mode are provided for the purposes of small-scale testing. For a production environment, distributed mode is advised. In distributed mode, multiple instances of HBase daemons run on multiple servers in the cluster.
+
+Just as in pseudo-distributed mode, a fully distributed configuration requires that you set the `hbase.cluster.distributed` property to `true`. Typically, the `hbase.rootdir` is configured to point to a highly-available HDFS filesystem.
+
+In addition, the cluster is configured so that multiple cluster nodes enlist as RegionServers, ZooKeeper QuorumPeers, and backup HMaster servers. These configuration basics are all demonstrated in [quickstart-fully-distributed](/docs/getting-started#fully-distributed-for-production).
+
+### Distributed RegionServers
+
+Typically, your cluster will contain multiple RegionServers all running on different servers, as well as primary and backup Master and ZooKeeper daemons. The _conf/regionservers_ file on the master server contains a list of hosts whose RegionServers are associated with this cluster. Each host is on a separate line. All hosts listed in this file will have their RegionServer processes started and stopped when the master server starts or stops.
+
+### ZooKeeper and HBase
+
+See the [ZooKeeper](/docs/zookeeper) section for ZooKeeper setup instructions for HBase.
+
+### Example: Distributed HBase Cluster
+
+This is a bare-bones _conf/hbase-site.xml_ for a distributed HBase cluster. A cluster that is used for real-world work would contain more custom configuration parameters. Most HBase configuration directives have default values, which are used unless the value is overridden in the _hbase-site.xml_. See "[Configuration Files](/docs/configuration/default)" for more information.
+
+```xml
+
+
+ hbase.rootdir
+ hdfs://namenode.example.org:9000/hbase
+
+
+ hbase.cluster.distributed
+ true
+
+
+ hbase.zookeeper.quorum
+ node-a.example.com,node-b.example.com,node-c.example.com
+
+
+```
+
+This is an example _conf/regionservers_ file, which contains a list of nodes that should run a RegionServer in the cluster. These nodes need HBase installed and they need to use the same contents of the _conf/_ directory as the Master server.
+
+```text
+node-a.example.com
+node-b.example.com
+node-c.example.com
+```
+
+This is an example _conf/backup-masters_ file, which contains a list of each node that should run a backup Master instance. The backup Master instances will sit idle unless the main Master becomes unavailable.
+
+```text
+node-b.example.com
+node-c.example.com
+```
+
+### Distributed HBase Quickstart
+
+See [quickstart-fully-distributed](/docs/getting-started#fully-distributed-for-production) for a walk-through of a simple three-node cluster configuration with multiple ZooKeeper, backup HMaster, and RegionServer instances.
+
+### Procedure: HDFS Client Configuration
+
+1. Of note, if you have made HDFS client configuration changes on your Hadoop cluster, such as configuration directives for HDFS clients, as opposed to server-side configurations, you must use one of the following methods to enable HBase to see and use these configuration changes:
+ - Add a pointer to your `HADOOP_CONF_DIR` to the `HBASE_CLASSPATH` environment variable in _hbase-env.sh_.
+ - Add a copy of _hdfs-site.xml_ (or _hadoop-site.xml_) or, better, symlinks, under `$HBASE_HOME/conf`, or
+ - if only a small set of HDFS client configurations, add them to _hbase-site.xml_.
+
+An example of such an HDFS client configuration is `dfs.replication`. If for example, you want to run with a replication factor of 5, HBase will create files with the default of 3 unless you do the above to make the configuration available to HBase.
+
+## Choosing between the Classic Package and the BYO Hadoop Package
+
+Starting with HBase 3.0, HBase includes two binary packages. The classic package includes both the HBase and Hadoop components, while the Hadoop-less "Bring Your Own Hadoop" package omits the Hadoop components, and uses the files from an existing Hadoop installation. The classic binary package filename is named `hbase-VERSION-bin.tar.gz` (e.g. _hbase-3.0.0-bin.tar.gz_), while the Hadoop-less package is `hbase-byo-hadoop-VERSION-bin.tar.gz` (e.g. _hbase-byo-hadoop-3.0.0-bin.tar.gz_).
+
+If the cluster nodes already have Hadoop installed, you can use the Hadoop-less package. In this case you need to make sure that the `HADOOP_HOME` environment variable is set and points to the Hadoop installation. The easiest way to ensure this is to set it in _hbase-env.sh_. You still need to make sure that the Hadoop configuration files are present on the HBase classpath, as described above.
+
+## Advantages of the BYO Hadoop package:
+
+- There is no need to replace the Hadoop libraries.
+- It is easier to upgrade Hadoop and HBase independently (as long as compatible versions are used).
+- Both the package and installed size are about 100 MB smaller.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/important.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/important.mdx
new file mode 100644
index 000000000000..0c72c4bf8850
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/important.mdx
@@ -0,0 +1,268 @@
+---
+title: "The Important Configurations"
+description: "Critical HBase configurations divided into required settings and recommended optimizations for production clusters."
+---
+
+## Required Configurations
+
+Review the [os](/docs/configuration/basic-prerequisites#operating-system-utilities) and [hadoop](/docs/configuration/basic-prerequisites#hadoop) sections.
+
+### Big Cluster Configurations
+
+If you have a cluster with a lot of regions, it is possible that a Regionserver checks in briefly after the Master starts while all the remaining RegionServers lag behind. This first server to check in will be assigned all regions which is not optimal. To prevent the above scenario from happening, up the `hbase.master.wait.on.regionservers.mintostart` property from its default value of 1. See [HBASE-6389 Modify the conditions to ensure that Master waits for sufficient number of Region Servers before starting region assignments](https://issues.apache.org/jira/browse/HBASE-6389) for more detail.
+
+## Recommended Configurations
+
+### ZooKeeper Configuration
+
+#### `zookeeper.session.timeout`
+
+The default timeout is 90 seconds (specified in milliseconds). This means that if a server crashes, it will be 90 seconds before the Master notices the crash and starts recovery. You might need to tune the timeout down to a minute or even less so the Master notices failures sooner. Before changing this value, be sure you have your JVM garbage collection configuration under control, otherwise, a long garbage collection that lasts beyond the ZooKeeper session timeout will take out your RegionServer. (You might be fine with this — you probably want recovery to start on the server if a RegionServer has been in GC for a long period of time).
+
+To change this configuration, edit _hbase-site.xml_, copy the changed file across the cluster and restart.
+
+We set this value high to save our having to field questions up on the mailing lists asking why a RegionServer went down during a massive import. The usual cause is that their JVM is untuned and they are running into long GC pauses. Our thinking is that while users are getting familiar with HBase, we'd save them having to know all of its intricacies. Later when they've built some confidence, then they can play with configuration such as this.
+
+#### Number of ZooKeeper Instances
+
+See [zookeeper](/docs/zookeeper).
+
+### HDFS Configurations
+
+#### `dfs.datanode.failed.volumes.tolerated`
+
+This is the "...number of volumes that are allowed to fail before a DataNode stops offering service. By default, any volume failure will cause a datanode to shutdown" from the _hdfs-default.xml_ description. You might want to set this to about half the amount of your available disks.
+
+#### `hbase.regionserver.handler.count` [#configuration-important-hbaseregionserverhandlercount]
+
+This setting defines the number of threads that are kept open to answer incoming requests to user tables. The rule of thumb is to keep this number low when the payload per request approaches the MB (big puts, scans using a large cache) and high when the payload is small (gets, small puts, ICVs, deletes). The total size of the queries in progress is limited by the setting `hbase.ipc.server.max.callqueue.size`.
+
+It is safe to set that number to the maximum number of incoming clients if their payload is small, the typical example being a cluster that serves a website since puts aren't typically buffered and most of the operations are gets.
+
+The reason why it is dangerous to keep this setting high is that the aggregate size of all the puts that are currently happening in a region server may impose too much pressure on its memory, or even trigger an OutOfMemoryError. A RegionServer running on low memory will trigger its JVM's garbage collector to run more frequently up to a point where GC pauses become noticeable (the reason being that all the memory used to keep all the requests' payloads cannot be trashed, no matter how hard the garbage collector tries). After some time, the overall cluster throughput is affected since every request that hits that RegionServer will take longer, which exacerbates the problem even more.
+
+You can get a sense of whether you have too little or too many handlers by [rpc.logging](/docs/troubleshooting#enabling-rpc-level-logging) on an individual RegionServer then tailing its logs (Queued requests consume memory).
+
+### Configuration for large memory machines
+
+HBase ships with a reasonable, conservative configuration that will work on nearly all machine types that people might want to test with. If you have larger machines — HBase has 8G and larger heap — you might find the following configuration options helpful. TODO.
+
+### Compression [#configuration-important-recommended-configurations-compression]
+
+You should consider enabling ColumnFamily compression. There are several options that are near-frictionless and in most all cases boost performance by reducing the size of StoreFiles and thus reducing I/O.
+
+See [compression](/docs/compression) for more information.
+
+### Configuring the size and number of WAL files
+
+HBase uses [wal]/docs/architecture/regionserver#write-ahead-log-wal to recover the memstore data that has not been flushed to disk in case of an RS failure. These WAL files should be configured to be slightly smaller than HDFS block (by default a HDFS block is 64Mb and a WAL file is ~60Mb).
+
+HBase also has a limit on the number of WAL files, designed to ensure there's never too much data that needs to be replayed during recovery. This limit needs to be set according to memstore configuration, so that all the necessary data would fit. It is recommended to allocate enough WAL files to store at least that much data (when all memstores are close to full). For example, with 16Gb RS heap, default memstore settings (0.4), and default WAL file size (~60Mb), 16Gb\*0.4/60, the starting point for WAL file count is ~109. However, as all memstores are not expected to be full all the time, less WAL files can be allocated.
+
+### Managed Splitting
+
+HBase generally handles splitting of your regions based upon the settings in your _hbase-default.xml_ and _hbase-site.xml_ configuration files. Important settings include `hbase.regionserver.region.split.policy`, `hbase.hregion.max.filesize`, `hbase.regionserver.regionSplitLimit`. A simplistic view of splitting is that when a region grows to `hbase.hregion.max.filesize`, it is split. For most usage patterns, you should use automatic splitting. See [manual region splitting decisions](/docs/architecture/regions#manual-region-splitting) for more information about manual region splitting.
+
+Instead of allowing HBase to split your regions automatically, you can choose to manage the splitting yourself. Manually managing splits works if you know your keyspace well, otherwise let HBase figure where to split for you. Manual splitting can mitigate region creation and movement under load. It also makes it so region boundaries are known and invariant (if you disable region splitting). If you use manual splits, it is easier doing staggered, time-based major compactions to spread out your network IO load.
+
+#### Disable Automatic Splitting
+
+To disable automatic splitting, you can set region split policy in either cluster configuration or table configuration to be `org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy`
+
+
+ If you disable automatic splits to diagnose a problem or during a period of fast data growth, it
+ is recommended to re-enable them when your situation becomes more stable. The potential benefits
+ of managing region splits yourself are not undisputed.
+
+
+#### Determine the Optimal Number of Pre-Split Regions
+
+The optimal number of pre-split regions depends on your application and environment. A good rule of thumb is to start with 10 pre-split regions per server and watch as data grows over time. It is better to err on the side of too few regions and perform rolling splits later. The optimal number of regions depends upon the largest StoreFile in your region. The size of the largest StoreFile will increase with time if the amount of data grows. The goal is for the largest region to be just large enough that the compaction selection algorithm only compacts it during a timed major compaction. Otherwise, the cluster can be prone to compaction storms with a large number of regions under compaction at the same time. It is important to understand that the data growth causes compaction storms and not the manual split decision.
+
+If the regions are split into too many large regions, you can increase the major compaction interval by configuring `HConstants.MAJOR_COMPACTION_PERIOD`. The `org.apache.hadoop.hbase.util.RegionSplitter` utility also provides a network-IO-safe rolling split of all regions.
+
+### Managed Compactions
+
+By default, major compactions are scheduled to run once in a 7-day period.
+
+If you need to control exactly when and how often major compaction runs, you can disable managed major compactions. See the entry for `hbase.hregion.majorcompaction` in the [compaction.parameters](/docs/architecture/regions#parameters-used-by-compaction-algorithm) table for details.
+
+
+ Major compactions are absolutely necessary for StoreFile clean-up. Do not disable them altogether.
+ You can run major compactions manually via the HBase shell or via the [Admin
+ API](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Admin.html#majorCompact(org.apache.hadoop.hbase.TableName)).
+
+
+For more information about compactions and the compaction file selection process, see [compaction](/docs/architecture/regions#compaction)
+
+### Speculative Execution [#configuration-important-recommended-configurations-speculative-execution]
+
+Speculative Execution of MapReduce tasks is on by default, and for HBase clusters it is generally advised to turn off Speculative Execution at a system-level unless you need it for a specific case, where it can be configured per-job. Set the properties `mapreduce.map.speculative` and `mapreduce.reduce.speculative` to false.
+
+## Other Configurations
+
+### Balancer
+
+The balancer is a periodic operation which is run on the master to redistribute regions on the cluster. It is configured via `hbase.balancer.period` and defaults to 300000 (5 minutes).
+
+See [master.processes.loadbalancer](/docs/architecture/master#loadbalancer) for more information on the LoadBalancer.
+
+### Disabling Blockcache
+
+Do not turn off block cache (You'd do it by setting `hfile.block.cache.size` to zero). Currently, we do not do well if you do this because the RegionServer will spend all its time loading HFile indices over and over again. If your working set is such that block cache does you no good, at least size the block cache such that HFile indices will stay up in the cache (you can get a rough idea on the size you need by surveying RegionServer UIs; you'll see index block size accounted near the top of the webpage).
+
+### Nagle's or the small package problem
+
+If a big 40ms or so occasional delay is seen in operations against HBase, try the [Nagle's](https://en.wikipedia.org/wiki/Nagle%27s_algorithm) setting. For example, see the user mailing list thread, [Inconsistent scan performance with caching set to 1](https://lists.apache.org/thread.html/3d7ceb41c04a955b1b1c80480cdba95208ca3e97bf6895a40e0c1bbb%401346186127%40%3Cuser.hbase.apache.org%3E) and the issue cited therein where setting `notcpdelay` improved scan speeds. You might also see the graphs on the tail of [HBASE-7008 Set scanner caching to a better default](https://issues.apache.org/jira/browse/HBASE-7008) where our Lars Hofhansl tries various data sizes w/ Nagle's on and off measuring the effect.
+
+### Better Mean Time to Recover (MTTR)
+
+This section is about configurations that will make servers come back faster after a fail. See the Deveraj Das and Nicolas Liochon blog post [Introduction to HBase Mean Time to Recover (MTTR)](http://hortonworks.com/blog/introduction-to-hbase-mean-time-to-recover-mttr/) for a brief introduction.
+
+The issue [HBASE-8354 forces Namenode into loop with lease recovery requests](https://issues.apache.org/jira/browse/HBASE-8389) is messy but has a bunch of good discussion toward the end on low timeouts and how to cause faster recovery including citation of fixes added to HDFS. Read the Varun Sharma comments. The below suggested configurations are Varun's suggestions distilled and tested. Make sure you are running on a late-version HDFS so you have the fixes he refers to and himself adds to HDFS that help HBase MTTR (e.g. HDFS-3703, HDFS-3712, and HDFS-4791 — Hadoop 2 for sure has them and late Hadoop 1 has some). Set the following in the RegionServer.
+
+```xml
+
+ hbase.lease.recovery.dfs.timeout
+ 23000
+ How much time we allow elapse between calls to recover lease.
+ Should be larger than the dfs timeout.
+
+
+ dfs.client.socket-timeout
+ 10000
+ Down the DFS timeout from 60 to 10 seconds.
+
+```
+
+And on the NameNode/DataNode side, set the following to enable 'staleness' introduced in HDFS-3703, HDFS-3912.
+
+```xml
+
+ dfs.client.socket-timeout
+ 10000
+ Down the DFS timeout from 60 to 10 seconds.
+
+
+ dfs.datanode.socket.write.timeout
+ 10000
+ Down the DFS timeout from 8 * 60 to 10 seconds.
+
+
+ ipc.client.connect.timeout
+ 3000
+ Down from 60 seconds to 3.
+
+
+ ipc.client.connect.max.retries.on.timeouts
+ 2
+ Down from 45 seconds to 3 (2 == 3 retries).
+
+
+ dfs.namenode.avoid.read.stale.datanode
+ true
+ Enable stale state in hdfs
+
+
+ dfs.namenode.stale.datanode.interval
+ 20000
+ Down from default 30 seconds
+
+
+ dfs.namenode.avoid.write.stale.datanode
+ true
+ Enable stale state in hdfs
+
+```
+
+### JMX
+
+JMX (Java Management Extensions) provides built-in instrumentation that enables you to monitor and manage the Java VM. To enable monitoring and management from remote systems, you need to set system property `com.sun.management.jmxremote.port` (the port number through which you want to enable JMX RMI connections) when you start the Java VM. See the [official documentation](http://docs.oracle.com/javase/8/docs/technotes/guides/management/agent.html) for more information. Historically, besides above port mentioned, JMX opens two additional random TCP listening ports, which could lead to port conflict problem. (See [HBASE-10289](https://issues.apache.org/jira/browse/HBASE-10289) for details)
+
+As an alternative, you can use the coprocessor-based JMX implementation provided by HBase. To enable it, add below property in _hbase-site.xml_:
+
+```xml
+
+ hbase.coprocessor.regionserver.classes
+ org.apache.hadoop.hbase.JMXListener
+
+```
+
+
+ DO NOT set `com.sun.management.jmxremote.port` for Java VM at the same time.
+
+
+Currently it supports Master and RegionServer Java VM. By default, the JMX listens on TCP port 10102, you can further configure the port using below properties:
+
+```xml
+
+ regionserver.rmi.registry.port
+ 61130
+
+
+ regionserver.rmi.connector.port
+ 61140
+
+```
+
+The registry port can be shared with connector port in most cases, so you only need to configure `regionserver.rmi.registry.port`. However, if you want to use SSL communication, the 2 ports must be configured to different values.
+
+By default the password authentication and SSL communication is disabled. To enable password authentication, you need to update _hbase-env.sh_ like below:
+
+```bash
+export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.authenticate=true \
+ -Dcom.sun.management.jmxremote.password.file=your_password_file \
+ -Dcom.sun.management.jmxremote.access.file=your_access_file"
+
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE "
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE "
+```
+
+See example password/access file under `$JRE_HOME/lib/management`.
+
+To enable SSL communication with password authentication, follow below steps:
+
+```bash
+#1. generate a key pair, stored in myKeyStore
+keytool -genkey -alias jconsole -keystore myKeyStore
+
+#2. export it to file jconsole.cert
+keytool -export -alias jconsole -keystore myKeyStore -file jconsole.cert
+
+#3. copy jconsole.cert to jconsole client machine, import it to jconsoleKeyStore
+keytool -import -alias jconsole -keystore jconsoleKeyStore -file jconsole.cert
+```
+
+And then update _hbase-env.sh_ like below:
+
+```bash
+export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=true \
+ -Djavax.net.ssl.keyStore=/home/tianq/myKeyStore \
+ -Djavax.net.ssl.keyStorePassword=your_password_in_step_1 \
+ -Dcom.sun.management.jmxremote.authenticate=true \
+ -Dcom.sun.management.jmxremote.password.file=your_password file \
+ -Dcom.sun.management.jmxremote.access.file=your_access_file"
+
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE "
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE "
+```
+
+Finally start `jconsole` on the client using the key store:
+
+```bash
+jconsole -J-Djavax.net.ssl.trustStore=/home/tianq/jconsoleKeyStore
+```
+
+
+ To enable the HBase JMX implementation on Master, you also need to add below property in
+ *hbase-site.xml*:
+
+
+```xml
+
+ hbase.coprocessor.master.classes
+ org.apache.hadoop.hbase.JMXListener
+
+```
+
+The corresponding properties for port configuration are `master.rmi.registry.port` (by default 10101) and `master.rmi.connector.port` (by default the same as registry.port)
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/index.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/index.mdx
new file mode 100644
index 000000000000..d753382f09cf
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/index.mdx
@@ -0,0 +1,47 @@
+---
+title: "Configuration"
+description: "Complete guide to HBase configuration including all settings, tuning parameters, and best practices."
+---
+
+This chapter expands upon the [Getting Started](/docs/getting-started) chapter to further explain configuration of Apache HBase. Please read this chapter carefully, especially the [Basic Prerequisites](basic-prerequisites) to ensure that your HBase testing and deployment goes smoothly. Familiarize yourself with [Support and Testing Expectations](/docs#support-and-testing-expectations) as well.
+
+## Configuration Files
+
+Apache HBase uses the same configuration system as Apache Hadoop. All configuration files are located in the _conf/_ directory, which needs to be kept in sync for each node on your cluster.
+
+**_backup-masters_**
+Not present by default. A plain-text file which lists hosts on which the Master should start a backup Master process, one host per line.
+
+**_hadoop-metrics2-hbase.properties_**
+Used to connect HBase Hadoop's Metrics2 framework. See the [Hadoop Wiki entry](https://cwiki.apache.org/confluence/display/HADOOP2/HADOOP-6728-MetricsV2) for more information on Metrics2. Contains only commented-out examples by default.
+
+**_hbase-env.cmd_ and _hbase-env.sh_**
+Script for Windows and Linux / Unix environments to set up the working environment for HBase, including the location of Java, Java options, and other environment variables. The file contains many commented-out examples to provide guidance.
+
+**_hbase-policy.xml_**
+The default policy configuration file used by RPC servers to make authorization decisions on client requests. Only used if HBase [security](/docs/security) is enabled.
+
+**_hbase-site.xml_**
+The main HBase configuration file. This file specifies configuration options which override HBase's default configuration. You can view (but do not edit) the default configuration file at _hbase-common/src/main/resources/hbase-default.xml_. You can also view the entire effective configuration for your cluster (defaults and overrides) in the **HBase Configuration** tab of the HBase Web UI.
+
+**_log4j2.properties_**
+Configuration file for HBase logging via `log4j2`.
+
+**_regionservers_**
+A plain-text file containing a list of hosts which should run a RegionServer in your HBase cluster. By default, this file contains the single entry `localhost`. It should contain a list of hostnames or IP addresses, one per line, and should only contain `localhost` if each node in your cluster will run a RegionServer on its `localhost` interface.
+
+
+ When you edit XML, it is a good idea to use an XML-aware editor to be sure that your syntax is
+ correct and your XML is well-formed. You can also use the `xmllint` utility to check that your XML
+ is well-formed. By default, `xmllint` re-flows and prints the XML to standard output. To check for
+ well-formedness and only print output if errors exist, use the command `xmllint -noout
+ filename.xml`.
+
+
+
+ When running in distributed mode, after you make an edit to an HBase configuration, make sure you
+ copy the contents of the *conf/* directory to all nodes of the cluster. HBase will not do this for
+ you. Use a configuration management tool for managing and copying the configuration files to your
+ nodes. For most configurations, a restart is needed for servers to pick up changes. Dynamic
+ configuration is an exception to this, to be described later below.
+
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/meta.json b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/meta.json
new file mode 100644
index 000000000000..a9bacf3b7c7e
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/configuration/meta.json
@@ -0,0 +1,4 @@
+{
+ "title": "Configuration",
+ "pages": ["basic-prerequisites", "hbase-run-models", "confirm", "default", "!hbase-default", "example", "important", "dynamic"]
+}
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/contributing-to-documentation.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/contributing-to-documentation.mdx
new file mode 100644
index 000000000000..b323709afb04
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/contributing-to-documentation.mdx
@@ -0,0 +1,317 @@
+---
+title: "Appendix: Contributing to Documentation"
+description: "Guide for contributing to Apache HBase documentation, including patch submission procedures, website editing, style guidelines, and best practices."
+---
+
+## Contributing to Documentation
+
+The Apache HBase project welcomes contributions to all aspects of the project, including the documentation.
+
+In HBase, documentation includes the following areas, and probably some others:
+
+- The [HBase Reference Guide](/docs) (this book)
+- The [HBase website](/)
+- API documentation
+- Command-line utility output and help text
+- Web UI strings, explicit help text, context-sensitive strings, and others
+- Log messages
+- Comments in source files, configuration files, and others
+- Localization of any of the above into target languages other than English
+
+No matter which area you want to help out with, the first step is almost always to download (typically by cloning the Git repository) and familiarize yourself with the HBase source code. For information on downloading and building the source, see [developer](/docs/building-and-developing).
+
+## Contributing to Documentation or Other Strings
+
+If you spot an error in a string in a UI, utility, script, log message, or elsewhere, or you think something could be made more clear, or you think text needs to be added where it doesn't currently exist, the first step is to file a JIRA. Be sure to set the component to `Documentation` in addition to any other involved components. Most components have one or more default owners, who monitor new issues which come into those queues. Regardless of whether you feel able to fix the bug, you should still file bugs where you see them.
+
+If you want to try your hand at fixing your newly-filed bug, assign it to yourself. You will need to clone the HBase Git repository to your local system and work on the issue there. When you have developed a potential fix, submit it for review. If it addresses the issue and is seen as an improvement, one of the HBase committers will commit it to one or more branches, as appropriate.
+
+### Procedure: Suggested Work flow for Submitting Patches
+
+This procedure goes into more detail than Git pros will need, but is included in this appendix so that people unfamiliar with Git can feel confident contributing to HBase while they learn.
+
+
+
+
+ If you have not already done so, clone the Git repository locally. You only need to do this once.
+
+
+
+ Fairly often, pull remote changes into your local repository by using the `git pull` command,
+ while your tracking branch is checked out.
+
+
+
+For each issue you work on, create a new branch. One convention that works well for naming the branches is to name a given branch the same as the JIRA it relates to:
+
+```bash
+$ git checkout -b HBASE-123456
+```
+
+
+
+
+ Make your suggested changes on your branch, committing your changes to your local repository
+ often. If you need to switch to working on a different issue, remember to check out the
+ appropriate branch.
+
+
+
+ When you are ready to submit your patch, first be sure that HBase builds cleanly and behaves as
+ expected in your modified branch.
+
+
+
+ If you have made documentation or website changes, verify that the site builds correctly by
+ running the development server from the `hbase-website/` directory.
+
+
+
+If it takes you several days or weeks to implement your fix, or you know that the area of the code you are working in has had a lot of changes lately, make sure you rebase your branch against the remote master and take care of any conflicts before submitting your patch.
+
+```bash
+$ git checkout HBASE-123456
+$ git rebase origin/master
+```
+
+
+
+
+Generate your patch against the remote master. Run the following command from the top level of your git repository (usually called `hbase`):
+
+```bash
+$ git format-patch --stdout origin/master > HBASE-123456.patch
+```
+
+The name of the patch should contain the JIRA ID.
+
+
+
+
+ Look over the patch file to be sure that you did not change any additional files by accident and
+ that there are no other surprises.
+
+
+
+ When you are satisfied, attach the patch to the JIRA and click the **Patch Available** button. A
+ reviewer will review your patch.
+
+
+
+ If you need to submit a new version of the patch, leave the old one on the JIRA and add a version
+ number to the name of the new patch.
+
+
+After a change has been committed, there is no need to keep your local branch around.
+
+
+
+## Editing the HBase Website and Documentation
+
+The HBase website and documentation are now part of a single application built with Remix and Fumadocs. The source files are located in the `hbase-website/` directory:
+
+- **Documentation pages**: `hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/` - individual MDX files for each documentation section
+- **Single-page view**: `hbase-website/app/pages/_docs/docs/_mdx/single-page/index.mdx` - combines all documentation into one page
+- **Website components**: `hbase-website/app/components/` - React components used throughout the site
+- **Images**: `hbase-website/public/` - static assets including images
+
+You can edit MDX files in any text editor or IDE. To preview your changes locally, run the development server from the `hbase-website/` directory and navigate to the documentation pages in your browser. When you are satisfied with your changes, follow the procedure in [submit doc patch procedure](/docs/contributing-to-documentation#procedure-suggested-work-flow-for-submitting-patches) to submit your patch.
+
+## Publishing the HBase Website and Documentation
+
+The HBase website and documentation are built and deployed as a single Remix application. The deployment process is managed through the project's CI/CD pipeline, which builds the site from the `hbase-website/` directory and deploys it automatically when changes are merged to the main branch.
+
+## MDX and Fumadocs Components
+
+The HBase documentation is written in MDX (Markdown with JSX), which allows you to use standard Markdown syntax along with React components. For comprehensive documentation on Markdown formatting and MDX features, refer to:
+
+- [Fumadocs Markdown Documentation](https://www.fumadocs.dev/docs/markdown) - Complete guide to MDX syntax and Fumadocs features
+- [CommonMark specification](https://commonmark.org/) - Standard Markdown syntax reference
+- [GFM (GitHub Flavored Markdown)](https://github.github.com/gfm) - GitHub-style Markdown extensions
+
+### Fumadocs Components
+
+Fumadocs provides several components that enhance the documentation:
+
+#### Steps Component
+
+Use `` to create numbered step-by-step instructions:
+
+```mdx
+
+
+First, do this thing.
+
+Then, do this other thing.
+
+
+```
+
+**Example output:**
+
+
+
+First, do this thing.
+
+Then, do this other thing.
+
+
+
+#### Callout Component
+
+Use `` for notes, warnings, and important information:
+
+```mdx
+This is an informational callout.
+
+This is a warning callout.
+```
+
+**Example output:**
+
+This is an informational callout.
+
+This is a warning callout.
+
+#### Include Directive
+
+The single-page documentation view uses `` tags to combine multiple MDX files:
+
+```mdx
+../(multi-page)/getting-started.mdx
+```
+
+See `hbase-website/app/pages/_docs/docs/_mdx/single-page/index.mdx` for examples of how all documentation sections are included in the single-page view.
+
+## Auto-Generated Content
+
+Some parts of the HBase documentation, such as the [default configuration](/docs/configuration/default), are generated automatically to stay in sync with the code. The configuration documentation is generated from the `hbase-common/src/main/resources/hbase-default.xml` file.
+
+To add or modify configuration parameters, update the source XML file. To regenerate the documentation from the updated configuration, run:
+
+```bash
+npm run extract-hbase-config
+```
+
+This command is also executed automatically when you run `npm ci`.
+
+## Images in the Documentation
+
+You can include images in the HBase documentation using standard Markdown syntax. Always include descriptive alt text for accessibility:
+
+```markdown
+
+```
+
+Save images to the `hbase-website/public/` directory or an appropriate subdirectory. Reference them in your MDX files using absolute paths from the public directory:
+
+```markdown
+
+```
+
+When submitting a patch that includes images, attach the images to the JIRA issue.
+
+## Adding a New Section to the Documentation
+
+To add a new section to the HBase documentation:
+
+1. Create a new MDX file in `hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/` with a descriptive name (e.g., `my-new-section.mdx`)
+2. Add frontmatter at the top of the file with a title and description:
+
+```mdx
+---
+title: "My New Section"
+description: "Brief description of what this section covers"
+---
+
+## My New Section
+
+Your content here...
+```
+
+3. Add your new file to `hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/meta.json` in the appropriate location within the `pages` array (without the `.mdx` extension):
+
+```json
+{
+ "pages": [
+ "---My Category---",
+ "my-new-section",
+ ...
+ ]
+}
+```
+
+4. Add an `` directive to `hbase-website/app/pages/_docs/docs/_mdx/single-page/index.mdx` in the appropriate location:
+
+```mdx
+../(multi-page)/my-new-section.mdx
+```
+
+5. Add your new file to Git before creating your patch.
+
+## Unique Headings Requirement
+
+Since all documentation files are merged into a single-page view, **all heading IDs must be unique across the entire documentation**. A test will fail during the build if duplicate heading IDs are detected, marking the problematic headings.
+
+Headings don't have to be visually unique, but their link IDs must be unique. You can customize the heading ID using Fumadocs syntax:
+
+```markdown
+## Configuration [#server-configuration]
+```
+
+This creates a heading that displays as "Configuration" but has the unique ID `#server-configuration` for linking purposes.
+
+### Hiding Headings from Table of Contents
+
+You can hide specific headings from the right-side table of contents:
+
+```markdown
+## Internal Implementation Details [!toc]
+```
+
+This heading will still appear in the document but won't show up in the table of contents navigation.
+
+
+ Note: `[!toc]` becomes part of the heading ID. For example, `## Usage [!toc]` will have the ID
+ `#usage-toc`.
+
+
+### Combining Custom IDs and TOC Hiding
+
+You can combine both attributes:
+
+```markdown
+## Configuration Details [#server-config] [!toc]
+```
+
+## Common Documentation Issues
+
+The following documentation issues come up often:
+
+1. **Isolate Changes for Easy Diff Review**
+
+ Avoid reformatting entire files when making content changes. If you need to reformat a file, do that in a separate JIRA where you do not change any content.
+
+2. **Syntax Highlighting**
+
+ MDX supports syntax highlighting for code blocks. Specify the language after the opening triple backticks:
+
+ ````markdown
+ ```java
+ public class Example {
+ // your code here
+ }
+ ```
+ ````
+
+3. **Component Syntax**
+
+ Remember to properly close Fumadocs components. Components like `` and `` must be properly closed:
+
+ ```mdx
+ Your content here
+ ```
+
+4. **Unique Heading IDs**
+
+ Ensure all heading IDs are unique across the entire documentation. If you get a test failure about duplicate headings, customize the heading ID using `[#custom-id]` syntax as described in the [Unique Headings Requirement](#unique-headings-requirement) section.
diff --git a/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/cp.mdx b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/cp.mdx
new file mode 100644
index 000000000000..17f68519655a
--- /dev/null
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/cp.mdx
@@ -0,0 +1,760 @@
+---
+title: "Apache HBase Coprocessors"
+description: "The coprocessor framework provides mechanisms for running your custom code directly on
+the RegionServers managing your data."
+---
+
+HBase Coprocessors are modeled after Google BigTable's coprocessor implementation
+(http://research.google.com/people/jeff/SOCC2010-keynote-slides.pdf pages 41-42.).
+Efforts are ongoing to bridge gaps between HBase's
+implementation and BigTable's architecture. For more information see
+[HBASE-4047](https://issues.apache.org/jira/browse/HBASE-4047).
+
+The information in this chapter is primarily sourced and heavily reused from the following
+resources:
+
+1. Mingjie Lai's blog post
+ [Coprocessor Introduction](https://blogs.apache.org/hbase/entry/coprocessor_introduction).
+2. Gaurav Bhardwaj's blog post
+ [The How To Of HBase Coprocessors](http://www.3pillarglobal.com/insights/hbase-coprocessors).
+
+
+Coprocessors are an advanced feature of HBase and are intended to be used by system
+developers only. Because coprocessor code runs directly on the RegionServer and has
+direct access to your data, they introduce the risk of data corruption, man-in-the-middle
+attacks, or other malicious data access. Currently, there is no mechanism to prevent
+data corruption by coprocessors, though work is underway on
+[HBASE-4047](https://issues.apache.org/jira/browse/HBASE-4047).
+
+In addition, there is no resource isolation, so a well-intentioned but misbehaving
+coprocessor can severely degrade cluster performance and stability.
+
+
+
+## Coprocessor Overview
+
+In HBase, you fetch data using a `Get` or `Scan`, whereas in an RDBMS you use a SQL
+query. In order to fetch only the relevant data, you filter it using a HBase
+[Filter](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/filter/Filter.html)
+, whereas in an RDBMS you use a `WHERE` predicate.
+
+After fetching the data, you perform computations on it. This paradigm works well
+for "small data" with a few thousand rows and several columns. However, when you scale
+to billions of rows and millions of columns, moving large amounts of data across your
+network will create bottlenecks at the network layer, and the client needs to be powerful
+enough and have enough memory to handle the large amounts of data and the computations.
+In addition, the client code can grow large and complex.
+
+In this scenario, coprocessors might make sense. You can put the business computation
+code into a coprocessor which runs on the RegionServer, in the same location as the
+data, and returns the result to the client.
+
+This is only one scenario where using coprocessors can provide benefit. Following
+are some analogies which may help to explain some of the benefits of coprocessors.
+
+### Coprocessor Analogies
+
+**Triggers and Stored Procedure**
+An Observer coprocessor is similar to a trigger in a RDBMS in that it executes
+your code either before or after a specific event (such as a `Get` or `Put`)
+occurs. An endpoint coprocessor is similar to a stored procedure in a RDBMS
+because it allows you to perform custom computations on the data on the
+RegionServer itself, rather than on the client.
+
+**MapReduce**
+MapReduce operates on the principle of moving the computation to the location of
+the data. Coprocessors operate on the same principal.
+
+**AOP**
+If you are familiar with Aspect Oriented Programming (AOP), you can think of a coprocessor
+as applying advice by intercepting a request and then running some custom code,
+before passing the request on to its final destination (or even changing the destination).
+
+### Coprocessor Implementation Overview
+
+1. Your class should implement one of the Coprocessor interfaces -
+ [Coprocessor](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/Coprocessor.html),
+ [RegionObserver](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html),
+ [CoprocessorService](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorService.html) - to name a few.
+2. Load the coprocessor, either statically (from the configuration) or dynamically,
+ using HBase Shell. For more details see [Loading Coprocessors](/docs/cp#loading-coprocessors).
+3. Call the coprocessor from your client-side code. HBase handles the coprocessor
+ transparently.
+
+The framework API is provided in the
+[coprocessor](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html)
+package.
+
+## Types of Coprocessors
+
+### Observer Coprocessors
+
+Observer coprocessors are triggered either before or after a specific event occurs.
+Observers that happen before an event use methods that start with a `pre` prefix,
+such as [`prePut`](). Observers that happen just after an event override methods that start
+with a `post` prefix, such as [`postPut`]().
+
+#### Use Cases for Observer Coprocessors
+
+**Security**
+Before performing a `Get` or `Put` operation, you can check for permission using
+`preGet` or `prePut` methods.
+
+**Referential Integrity**
+HBase does not directly support the RDBMS concept of refential integrity, also known
+as foreign keys. You can use a coprocessor to enforce such integrity. For instance,
+if you have a business rule that every insert to the `users` table must be followed
+by a corresponding entry in the `user_daily_attendance` table, you could implement
+a coprocessor to use the `prePut` method on `user` to insert a record into `user_daily_attendance`.
+
+**Secondary Indexes**
+You can use a coprocessor to maintain secondary indexes. For more information, see
+[SecondaryIndexing](https://cwiki.apache.org/confluence/display/HADOOP2/Hbase+SecondaryIndexing).
+
+#### Types of Observer Coprocessor
+
+**RegionObserver**
+A RegionObserver coprocessor allows you to observe events on a region, such as `Get`
+and `Put` operations. See
+[RegionObserver](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html).
+
+**RegionServerObserver**
+A RegionServerObserver allows you to observe events related to the RegionServer's
+operation, such as starting, stopping, or performing merges, commits, or rollbacks.
+See
+[RegionServerObserver](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.html).
+
+**MasterObserver**
+A MasterObserver allows you to observe events related to the HBase Master, such
+as table creation, deletion, or schema modification. See
+[MasterObserver](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html).
+
+**WalObserver**
+A WalObserver allows you to observe events related to writes to the Write-Ahead
+Log (WAL). See
+[WALObserver](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/WALObserver.html).
+
+[Examples](/docs/cp#cp-examples) provides working examples of observer coprocessors.
+
+### Endpoint Coprocessor
+
+Endpoint processors allow you to perform computation at the location of the data.
+See [Coprocessor Analogy](/docs/cp#coprocessor-analogies). An example is the need to calculate a running
+average or summation for an entire table which spans hundreds of regions.
+
+In contrast to observer coprocessors, where your code is run transparently, endpoint
+coprocessors must be explicitly invoked using the
+[CoprocessorService()]()
+method available in
+[AsyncTable](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html).
+
+
+The coprocessorService method in [Table](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html)
+has been deprecated.
+
+In [HBASE-21512](https://issues.apache.org/jira/browse/HBASE-21512)
+we reimplement the sync client based on the async client. The coprocessorService
+method defined in `Table` interface directly references a method from protobuf's
+`BlockingInterface`, which means we need to use a separate thread pool to execute
+the method so we avoid blocking the async client(We want to avoid blocking calls in
+our async implementation).
+
+Since coprocessor is an advanced feature, we believe it is OK for coprocessor users to
+instead switch over to use `AsyncTable`. There is a lightweight
+[toAsyncConnection]()
+method to get an `AsyncConnection` from `Connection` if needed.
+
+
+
+Starting with HBase 0.96, endpoint coprocessors are implemented using Google Protocol
+Buffers (protobuf). For more details on protobuf, see Google's
+[Protocol Buffer Guide](https://developers.google.com/protocol-buffers/docs/proto).
+Endpoints Coprocessor written in version 0.94 are not compatible with version 0.96 or later.
+See
+[HBASE-5448](https://issues.apache.org/jira/browse/HBASE-5448)). To upgrade your
+HBase cluster from 0.94 or earlier to 0.96 or later, you need to reimplement your
+coprocessor.
+
+In HBase 2.x, we made use of a shaded version of protobuf 3.x, but kept the
+protobuf for coprocessors on 2.5.0. In HBase 3.0.0, we removed all dependencies on
+non-shaded protobuf so you need to reimplement your coprocessor to make use of the
+shaded protobuf version provided in hbase-thirdparty. Please see
+the [protobuf](/docs/protobuf) section for more details.
+
+Coprocessor Endpoints should make no use of HBase internals and
+only avail of public APIs; ideally a CPEP should depend on Interfaces
+and data structures only. This is not always possible but beware
+that doing so makes the Endpoint brittle, liable to breakage as HBase
+internals evolve. HBase internal APIs annotated as private or evolving
+do not have to respect semantic versioning rules or general java rules on
+deprecation before removal. While generated protobuf files are
+absent the hbase audience annotations — they are created by the
+protobuf protoc tool which knows nothing of how HBase works —
+they should be consided `@InterfaceAudience.Private` so are liable to
+change.
+
+[Examples](/docs/cp#cp-examples) provides working examples of endpoint coprocessors.
+
+## Loading Coprocessors
+
+To make your coprocessor available to HBase, it must be _loaded_, either statically
+(through the HBase configuration) or dynamically (using HBase Shell or the Java API).
+
+### Static Loading
+
+Follow these steps to statically load your coprocessor. Keep in mind that you must
+restart HBase to unload a coprocessor that has been loaded statically.
+
+1. Define the Coprocessor in _hbase-site.xml_, with a `` element with a ``
+ and a `` sub-element. The `` should be one of the following:
+
+ - `hbase.coprocessor.region.classes` for RegionObservers and Endpoints.
+ - `hbase.coprocessor.wal.classes` for WALObservers.
+ - `hbase.coprocessor.master.classes` for MasterObservers.
+
+ `` must contain the fully-qualified class name of your coprocessor's implementation
+ class.
+
+ For example to load a Coprocessor (implemented in class SumEndPoint.java) you have to create
+ following entry in RegionServer's 'hbase-site.xml' file (generally located under 'conf' directory):
+
+ ```xml
+
+ hbase.coprocessor.region.classes
+ org.myname.hbase.coprocessor.endpoint.SumEndPoint
+
+ ```
+
+ If multiple classes are specified for loading, the class names must be comma-separated.
+ The framework attempts to load all the configured classes using the default class loader.
+ Therefore, the jar file must reside on the server-side HBase classpath.
+
+ Coprocessors which are loaded in this way will be active on all regions of all tables.
+ These are also called system Coprocessor.
+ The first listed Coprocessors will be assigned the priority `Coprocessor.Priority.SYSTEM`.
+ Each subsequent coprocessor in the list will have its priority value incremented by one (which
+ reduces its priority, because priorities have the natural sort order of Integers).
+
+ These priority values can be manually overriden in hbase-site.xml. This can be useful if you
+ want to guarantee that a coprocessor will execute after another. For example, in the following
+ configuration `SumEndPoint` would be guaranteed to go last, except in the case of a tie with
+ another coprocessor:
+
+ ```xml
+
+ hbase.coprocessor.region.classes
+ org.myname.hbase.coprocessor.endpoint.SumEndPoint|2147483647
+
+ ```
+
+ When calling out to registered observers, the framework executes their callbacks methods in the
+ sorted order of their priority.
+ Ties are broken arbitrarily.
+
+2. Put your code on HBase's classpath. One easy way to do this is to drop the jar
+ (containing you code and all the dependencies) into the `lib/` directory in the
+ HBase installation.
+
+3. Restart HBase.
+
+### Static Unloading
+
+1. Delete the coprocessor's `` element, including sub-elements, from `hbase-site.xml`.
+2. Restart HBase.
+3. Optionally, remove the coprocessor's JAR file from the classpath or HBase's `lib/`
+ directory.
+
+### Dynamic Loading
+
+You can also load a coprocessor dynamically, without restarting HBase. This may seem
+preferable to static loading, but dynamically loaded coprocessors are loaded on a
+per-table basis, and are only available to the table for which they were loaded. For
+this reason, dynamically loaded tables are sometimes called **Table Coprocessor**.
+
+In addition, dynamically loading a coprocessor acts as a schema change on the table,
+and the table must be taken offline to load the coprocessor.
+
+There are three ways to dynamically load Coprocessor.
+
+
+The below mentioned instructions makes the following assumptions:
+
+- A JAR called `coprocessor.jar` contains the Coprocessor implementation along with all of its
+ dependencies.
+- The JAR is available in HDFS in some location like
+ `hdfs://NAMENODE:PORT/user/HADOOP_USER/coprocessor.jar`.
+
+
+
+#### Using HBase Shell
+
+1. Load the Coprocessor, using a command like the following:
+
+ ```ruby
+ hbase alter 'users', METHOD => 'table_att', 'Coprocessor'=>'hdfs://NAMENODE:PORT/user/HADOOP_USER/coprocessor.jar|org.myname.hbase.Coprocessor.RegionObserverExample|1073741823|arg1=1,arg2=2'
+ ```
+
+ The Coprocessor framework will try to read the class information from the coprocessor table
+ attribute value.
+ The value contains four pieces of information which are separated by the pipe (`|`) character.
+ - File path: The jar file containing the Coprocessor implementation must be in a location where
+ all region servers can read it.
+ You could copy the file onto the local disk on each region server, but it is recommended to store
+ it in HDFS.
+ [HBASE-14548](https://issues.apache.org/jira/browse/HBASE-14548) allows a directory containing the jars
+ or some wildcards to be specified, such as: `hdfs://NAMENODE:PORT/user/HADOOP_USER/` or
+ `hdfs://NAMENODE:PORT/user/HADOOP_USER/*.jar`. Please note that if a directory is specified,
+ all jar files(.jar) in the directory are added. It does not search for files in sub-directories.
+ Do not use a wildcard if you would like to specify a directory. This enhancement applies to the
+ usage via the JAVA API as well.
+ - Class name: The full class name of the Coprocessor.
+ - Priority: An integer. The framework will determine the execution sequence of all configured
+ observers registered at the same hook using priorities. This field can be left blank. In that
+ case the framework will assign a default priority value.
+ - Arguments (Optional): This field is passed to the Coprocessor implementation. This is optional.
+
+2. Verify that the coprocessor loaded:
+
+ ```ruby
+ hbase(main):04:0> describe 'users'
+ ```
+
+ The coprocessor should be listed in the `TABLE_ATTRIBUTES`.
+
+#### Using the Java API (all HBase versions)
+
+The following Java code shows how to use the `setValue()` method of `HTableDescriptor`
+to load a coprocessor on the `users` table.
+
+```java
+TableName tableName = TableName.valueOf("users");
+String path = "hdfs://:/user//coprocessor.jar";
+Configuration conf = HBaseConfiguration.create();
+Connection connection = ConnectionFactory.createConnection(conf);
+Admin admin = connection.getAdmin();
+HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
+HColumnDescriptor columnFamily1 = new HColumnDescriptor("personalDet");
+columnFamily1.setMaxVersions(3);
+hTableDescriptor.addFamily(columnFamily1);
+HColumnDescriptor columnFamily2 = new HColumnDescriptor("salaryDet");
+columnFamily2.setMaxVersions(3);
+hTableDescriptor.addFamily(columnFamily2);
+hTableDescriptor.setValue("COPROCESSOR$1", path + "|"
++ RegionObserverExample.class.getCanonicalName() + "|"
++ Coprocessor.PRIORITY_USER);
+admin.modifyTable(tableName, hTableDescriptor);
+```
+
+#### Using the Java API (HBase 0.96+ only)
+
+In HBase 0.96 and newer, the `addCoprocessor()` method of `HTableDescriptor` provides
+an easier way to load a coprocessor dynamically.
+
+```java
+TableName tableName = TableName.valueOf("users");
+Path path = new Path("hdfs://:/user//coprocessor.jar");
+Configuration conf = HBaseConfiguration.create();
+Connection connection = ConnectionFactory.createConnection(conf);
+Admin admin = connection.getAdmin();
+HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
+HColumnDescriptor columnFamily1 = new HColumnDescriptor("personalDet");
+columnFamily1.setMaxVersions(3);
+hTableDescriptor.addFamily(columnFamily1);
+HColumnDescriptor columnFamily2 = new HColumnDescriptor("salaryDet");
+columnFamily2.setMaxVersions(3);
+hTableDescriptor.addFamily(columnFamily2);
+hTableDescriptor.addCoprocessor(RegionObserverExample.class.getCanonicalName(), path,
+Coprocessor.PRIORITY_USER, null);
+admin.modifyTable(tableName, hTableDescriptor);
+```
+
+
+ There is no guarantee that the framework will load a given Coprocessor successfully. For example,
+ the shell command neither guarantees a jar file exists at a particular location nor verifies
+ whether the given class is actually contained in the jar file.
+
+
+### Dynamic Unloading
+
+#### Using HBase Shell
+
+1. Alter the table to remove the coprocessor with `table_att_unset`.
+
+ ```ruby
+ hbase> alter 'users', METHOD => 'table_att_unset', NAME => 'coprocessor$1'
+ ```
+
+2. Alter the table to remove the coprocessor with `table_remove_coprocessor` introduced in
+ [HBASE-26524](https://issues.apache.org/jira/browse/HBASE-26524) by specifying an explicit
+ classname
+
+ ``` ruby
+ hbase> alter 'users', METHOD => 'table_remove_coprocessor', CLASSNAME => \
+ 'org.myname.hbase.Coprocessor.RegionObserverExample'
+ ```
+
+#### Using the Java API
+
+Reload the table definition without setting the value of the coprocessor either by
+using `setValue()` or `addCoprocessor()` methods. This will remove any coprocessor
+attached to the table.
+
+```java
+TableName tableName = TableName.valueOf("users");
+String path = "hdfs://:/user//coprocessor.jar";
+Configuration conf = HBaseConfiguration.create();
+Connection connection = ConnectionFactory.createConnection(conf);
+Admin admin = connection.getAdmin();
+HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
+HColumnDescriptor columnFamily1 = new HColumnDescriptor("personalDet");
+columnFamily1.setMaxVersions(3);
+hTableDescriptor.addFamily(columnFamily1);
+HColumnDescriptor columnFamily2 = new HColumnDescriptor("salaryDet");
+columnFamily2.setMaxVersions(3);
+hTableDescriptor.addFamily(columnFamily2);
+admin.modifyTable(tableName, hTableDescriptor);
+```
+
+In HBase 0.96 and newer, you can instead use the `removeCoprocessor()` method of the
+`HTableDescriptor` class.
+
+## Examples [#cp-examples]
+
+HBase ships examples for Observer Coprocessor.
+
+A more detailed example is given below.
+
+These examples assume a table called `users`, which has two column families `personalDet`
+and `salaryDet`, containing personal and salary details. Below is the graphical representation
+of the `users` table.
+
+**Users Table**
+
+| | **personalDet** | | | **salaryDet** | | |
+| ---------- | --------------- | ------------ | ---------- | ------------- | ------- | -------------- |
+| **rowkey** | **name** | **lastname** | **dob** | **gross** | **net** | **allowances** |
+| admin | Admin | Admin | | | | |
+| cdickens | Charles | Dickens | 02/07/1812 | 10000 | 8000 | 2000 |
+| jverne | Jules | Verne | 02/08/1828 | 12000 | 9000 | 3000 |
+
+### Observer Example
+
+The following Observer coprocessor prevents the details of the user `admin` from being
+returned in a `Get` or `Scan` of the `users` table.
+
+1. Write a class that implements the
+ [RegionCoprocessor](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionCoprocessor.html),
+ [RegionObserver](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/coprocessor/RegionObserver.html)
+ class.
+2. Override the `preGetOp()` method (the `preGet()` method is deprecated) to check
+ whether the client has queried for the rowkey with value `admin`. If so, return an
+ empty result. Otherwise, process the request as normal.
+3. Put your code and dependencies in a JAR file.
+4. Place the JAR in HDFS where HBase can locate it.
+5. Load the Coprocessor.
+6. Write a simple program to test it.
+
+Following are the implementation of the above steps:
+
+```java
+public class RegionObserverExample implements RegionCoprocessor, RegionObserver {
+
+ private static final byte[] ADMIN = Bytes.toBytes("admin");
+ private static final byte[] COLUMN_FAMILY = Bytes.toBytes("details");
+ private static final byte[] COLUMN = Bytes.toBytes("Admin_det");
+ private static final byte[] VALUE = Bytes.toBytes("You can't see Admin details");
+
+ @Override
+ public Optional getRegionObserver() {
+ return Optional.of(this);
+ }
+
+ @Override
+ public void preGetOp(final ObserverContext e, final Get get, final List results)
+ throws IOException {
+
+ if (Bytes.equals(get.getRow(),ADMIN)) {
+ Cell c = CellUtil.createCell(get.getRow(),COLUMN_FAMILY, COLUMN,
+ System.currentTimeMillis(), (byte)4, VALUE);
+ results.add(c);
+ e.bypass();
+ }
+ }
+}
+```
+
+Overriding the `preGetOp()` will only work for `Get` operations. You also need to override
+the `preScannerOpen()` method to filter the `admin` row from scan results.
+
+```java
+@Override
+public RegionScanner preScannerOpen(final ObserverContext e, final Scan scan,
+final RegionScanner s) throws IOException {
+
+ Filter filter = new RowFilter(CompareOp.NOT_EQUAL, new BinaryComparator(ADMIN));
+ scan.setFilter(filter);
+ return s;
+}
+```
+
+This method works but there is a _side effect_. If the client has used a filter in
+its scan, that filter will be replaced by this filter. Instead, you can explicitly
+remove any `admin` results from the scan:
+
+```java
+@Override
+public boolean postScannerNext(final ObserverContext e, final InternalScanner s,
+final List results, final int limit, final boolean hasMore) throws IOException {
+ Result result = null;
+ Iterator iterator = results.iterator();
+ while (iterator.hasNext()) {
+ result = iterator.next();
+ if (Bytes.equals(result.getRow(), ROWKEY)) {
+ iterator.remove();
+ break;
+ }
+ }
+ return hasMore;
+}
+```
+
+### Endpoint Example
+
+Still using the `users` table, this example implements a coprocessor to calculate
+the sum of all employee salaries, using an endpoint coprocessor.
+
+1. Create a '.proto' file defining your service.
+
+ ```protobuf
+ option java_package = "org.myname.hbase.coprocessor.autogenerated";
+ option java_outer_classname = "Sum";
+ option java_generic_services = true;
+ option java_generate_equals_and_hash = true;
+ option optimize_for = SPEED;
+ message SumRequest {
+ required string family = 1;
+ required string column = 2;
+ }
+
+ message SumResponse {
+ required int64 sum = 1 [default = 0];
+ }
+
+ service SumService {
+ rpc getSum(SumRequest)
+ returns (SumResponse);
+ }
+ ```
+
+2. Execute the `protoc` command to generate the Java code from the above .proto' file.
+
+ ```bash
+ $ mkdir src
+ $ protoc --java_out=src ./sum.proto
+ ```
+
+ This will generate a class call `Sum.java`.
+
+3. Write a class that extends the generated service class, implement the `Coprocessor`
+ and `CoprocessorService` classes, and override the service method.
+
+
+ If you load a coprocessor from `hbase-site.xml` and then load the same coprocessor
+ again using HBase Shell, it will be loaded a second time. The same class will
+ exist twice, and the second instance will have a higher ID (and thus a lower priority).
+ The effect is that the duplicate coprocessor is effectively ignored.
+
+
+ ```java
+ public class SumEndPoint extends Sum.SumService implements Coprocessor, CoprocessorService {
+
+ private RegionCoprocessorEnvironment env;
+
+ @Override
+ public Service getService() {
+ return this;
+ }
+
+ @Override
+ public void start(CoprocessorEnvironment env) throws IOException {
+ if (env instanceof RegionCoprocessorEnvironment) {
+ this.env = (RegionCoprocessorEnvironment)env;
+ } else {
+ throw new CoprocessorException("Must be loaded on a table region!");
+ }
+ }
+
+ @Override
+ public void stop(CoprocessorEnvironment env) throws IOException {
+ // do nothing
+ }
+
+ @Override
+ public void getSum(RpcController controller, Sum.SumRequest request, RpcCallback done) {
+ Scan scan = new Scan();
+ scan.addFamily(Bytes.toBytes(request.getFamily()));
+ scan.addColumn(Bytes.toBytes(request.getFamily()), Bytes.toBytes(request.getColumn()));
+
+ Sum.SumResponse response = null;
+ InternalScanner scanner = null;
+
+ try {
+ scanner = env.getRegion().getScanner(scan);
+ List results = new ArrayList<>();
+ boolean hasMore = false;
+ long sum = 0L;
+
+ do {
+ hasMore = scanner.next(results);
+ for (Cell cell : results) {
+ sum = sum + Bytes.toLong(CellUtil.cloneValue(cell));
+ }
+ results.clear();
+ } while (hasMore);
+
+ response = Sum.SumResponse.newBuilder().setSum(sum).build();
+ } catch (IOException ioe) {
+ ResponseConverter.setControllerException(controller, ioe);
+ } finally {
+ if (scanner != null) {
+ try {
+ scanner.close();
+ } catch (IOException ignored) {}
+ }
+ }
+
+ done.run(response);
+ }
+ }
+ ```
+
+ ```java
+ Configuration conf = HBaseConfiguration.create();
+ Connection connection = ConnectionFactory.createConnection(conf);
+ TableName tableName = TableName.valueOf("users");
+ Table table = connection.getTable(tableName);
+
+ final Sum.SumRequest request = Sum.SumRequest.newBuilder().setFamily("salaryDet").setColumn("gross").build();
+ try {
+ Map results = table.coprocessorService(
+ Sum.SumService.class,
+ null, /* start key */
+ null, /* end key */
+ new Batch.Call() {
+ @Override
+ public Long call(Sum.SumService aggregate) throws IOException {
+ BlockingRpcCallback rpcCallback = new BlockingRpcCallback<>();
+ aggregate.getSum(null, request, rpcCallback);
+ Sum.SumResponse response = rpcCallback.get();
+
+ return response.hasSum() ? response.getSum() : 0L;
+ }
+ }
+ );
+
+ for (Long sum : results.values()) {
+ System.out.println("Sum = " + sum);
+ }
+ } catch (ServiceException e) {
+ e.printStackTrace();
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ ```
+
+4. Load the Coprocessor.
+
+5. Write a client code to call the Coprocessor.
+
+## Guidelines For Deploying A Coprocessor
+
+**Bundling Coprocessors**
+You can bundle all classes for a coprocessor into a
+single JAR on the RegionServer's classpath, for easy deployment. Otherwise,
+place all dependencies on the RegionServer's classpath so that they can be
+loaded during RegionServer start-up. The classpath for a RegionServer is set
+in the RegionServer's `hbase-env.sh` file.
+
+**Automating Deployment**
+You can use a tool such as Puppet, Chef, or
+Ansible to ship the JAR for the coprocessor to the required location on your
+RegionServers' filesystems and restart each RegionServer, to automate
+coprocessor deployment. Details for such set-ups are out of scope of this
+document.
+
+**Updating a Coprocessor**
+Deploying a new version of a given coprocessor is not as simple as disabling it,
+replacing the JAR, and re-enabling the coprocessor. This is because you cannot
+reload a class in a JVM unless you delete all the current references to it.
+Since the current JVM has reference to the existing coprocessor, you must restart
+the JVM, by restarting the RegionServer, in order to replace it. This behavior
+is not expected to change.
+
+**Coprocessor Logging**
+The Coprocessor framework does not provide an API for logging beyond standard Java
+logging.
+
+**Coprocessor Configuration**
+If you do not want to load coprocessors from the HBase Shell, you can add their configuration
+properties to `hbase-site.xml`. In [Using HBase Shell](/docs/cp#using-hbase-shell), two arguments are
+set: `arg1=1,arg2=2`. These could have been added to `hbase-site.xml` as follows:
+
+```xml
+
+ arg1
+ 1
+
+
+ arg2
+ 2
+
+```
+
+Then you can read the configuration using code like the following:
+
+```java
+Configuration conf = HBaseConfiguration.create();
+Connection connection = ConnectionFactory.createConnection(conf);
+TableName tableName = TableName.valueOf("users");
+Table table = connection.getTable(tableName);
+
+Get get = new Get(Bytes.toBytes("admin"));
+Result result = table.get(get);
+for (Cell c : result.rawCells()) {
+ System.out.println(Bytes.toString(CellUtil.cloneRow(c))
+ + "==> " + Bytes.toString(CellUtil.cloneFamily(c))
+ + "{" + Bytes.toString(CellUtil.cloneQualifier(c))
+ + ":" + Bytes.toLong(CellUtil.cloneValue(c)) + "}");
+}
+Scan scan = new Scan();
+ResultScanner scanner = table.getScanner(scan);
+for (Result res : scanner) {
+ for (Cell c : res.rawCells()) {
+ System.out.println(Bytes.toString(CellUtil.cloneRow(c))
+ + " ==> " + Bytes.toString(CellUtil.cloneFamily(c))
+ + " {" + Bytes.toString(CellUtil.cloneQualifier(c))
+ + ":" + Bytes.toLong(CellUtil.cloneValue(c))
+ + "}");
+ }
+}
+```
+
+## Restricting Coprocessor Usage
+
+Restricting arbitrary user coprocessors can be a big concern in multitenant environments. HBase provides a continuum of options for ensuring only expected coprocessors are running:
+
+- `hbase.coprocessor.enabled`: Enables or disables all coprocessors. This will limit the functionality of HBase, as disabling all coprocessors will disable some security providers. An example coproccessor so affected is `org.apache.hadoop.hbase.security.access.AccessController`.
+ - `hbase.coprocessor.user.enabled`: Enables or disables loading coprocessors on tables (i.e. user coprocessors).
+ - One can statically load coprocessors, and optionally tune their priorities, via the following tunables in `hbase-site.xml`:
+ - `hbase.coprocessor.regionserver.classes`: A comma-separated list of coprocessors that are loaded by region servers
+ - `hbase.coprocessor.region.classes`: A comma-separated list of RegionObserver and Endpoint coprocessors
+ - `hbase.coprocessor.user.region.classes`: A comma-separated list of coprocessors that are loaded by all regions
+ - `hbase.coprocessor.master.classes`: A comma-separated list of coprocessors that are loaded by the master (MasterObserver coprocessors)
+ - `hbase.coprocessor.wal.classes`: A comma-separated list of WALObserver coprocessors to load
+ - `hbase.coprocessor.abortonerror`: Whether to abort the daemon which has loaded the coprocessor if the coprocessor should error other than `IOError`. If this is set to false and an access controller coprocessor should have a fatal error the coprocessor will be circumvented, as such in secure installations this is advised to be `true`; however, one may override this on a per-table basis for user coprocessors, to ensure they do not abort their running region server and are instead unloaded on error.
+ - `hbase.coprocessor.region.whitelist.paths`: A comma separated list available for those loading `org.apache.hadoop.hbase.security.access.CoprocessorWhitelistMasterObserver` whereby one can use the following options to white-list paths from which coprocessors may be loaded.
+ - Coprocessors on the classpath are implicitly white-listed
+ - `*` to wildcard all coprocessor paths
+ - An entire filesystem (e.g. `hdfs://my-cluster/`)
+ - A wildcard path to be evaluated by [FilenameUtils.wildcardMatch](https://commons.apache.org/proper/commons-io/javadocs/api-release/org/apache/commons/io/FilenameUtils.html)
+ - Note: Path can specify scheme or not (e.g. `file:///usr/hbase/lib/coprocessors` or for all filesystems `/usr/hbase/lib/coprocessors`)
diff --git a/src/main/asciidoc/_chapters/datamodel.adoc b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/datamodel.mdx
similarity index 56%
rename from src/main/asciidoc/_chapters/datamodel.adoc
rename to hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/datamodel.mdx
index e4e488f1835a..0f2cd1b2349a 100644
--- a/src/main/asciidoc/_chapters/datamodel.adoc
+++ b/hbase-website/app/pages/_docs/docs/_mdx/(multi-page)/datamodel.mdx
@@ -1,112 +1,86 @@
-////
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-////
-
-[[datamodel]]
-= Data Model
-:doctype: book
-:numbered:
-:toc: left
-:icons: font
-:experimental:
-
-In HBase, data is stored in tables, which have rows and columns.
-This is a terminology overlap with relational databases (RDBMSs), but this is not a helpful analogy.
-Instead, it can be helpful to think of an HBase table as a multi-dimensional map.
-
-.HBase Data Model Terminology
-
-Table::
- An HBase table consists of multiple rows.
-
-Row::
- A row in HBase consists of a row key and one or more columns with values associated with them.
- Rows are sorted alphabetically by the row key as they are stored.
- For this reason, the design of the row key is very important.
- The goal is to store data in such a way that related rows are near each other.
- A common row key pattern is a website domain.
- If your row keys are domains, you should probably store them in reverse (org.apache.www, org.apache.mail, org.apache.jira). This way, all of the Apache domains are near each other in the table, rather than being spread out based on the first letter of the subdomain.
-
-Column::
- A column in HBase consists of a column family and a column qualifier, which are delimited by a `:` (colon) character.
-
-Column Family::
- Column families physically colocate a set of columns and their values, often for performance reasons.
- Each column family has a set of storage properties, such as whether its values should be cached in memory, how its data is compressed or its row keys are encoded, and others.
- Each row in a table has the same column families, though a given row might not store anything in a given column family.
-
-Column Qualifier::
- A column qualifier is added to a column family to provide the index for a given piece of data.
- Given a column family `content`, a column qualifier might be `content:html`, and another might be `content:pdf`.
- Though column families are fixed at table creation, column qualifiers are mutable and may differ greatly between rows.
-
-Cell::
- A cell is a combination of row, column family, and column qualifier, and contains a value and a timestamp, which represents the value's version.
-
-Timestamp::
- A timestamp is written alongside each value, and is the identifier for a given version of a value.
- By default, the timestamp represents the time on the RegionServer when the data was written, but you can specify a different timestamp value when you put data into the cell.
-
-[[conceptual.view]]
-== Conceptual View
-
-You can read a very understandable explanation of the HBase data model in the blog post link:https://dzone.com/articles/understanding-hbase-and-bigtab[Understanding HBase and BigTable] by Jim R. Wilson.
-Another good explanation is available in the PDF link:http://0b4af6cdc2f0c5998459-c0245c5c937c5dedcca3f1764ecc9b2f.r43.cf2.rackcdn.com/9353-login1210_khurana.pdf[Introduction to Basic Schema Design] by Amandeep Khurana.
+---
+title: "Data Model"
+description: "In HBase, data is stored in tables, which have rows and columns. This is a terminology overlap with relational databases (RDBMSs), but this is not a helpful analogy. Instead, it can be helpful to think of an HBase table as a multi-dimensional map."
+---
+
+## HBase Data Model Terminology
+
+#### Table [!toc]
+
+An HBase table consists of multiple rows.
+
+#### Row [!toc]
+
+A row in HBase consists of a row key and one or more columns with values associated with them.
+Rows are sorted alphabetically by the row key as they are stored.
+For this reason, the design of the row key is very important.
+The goal is to store data in such a way that related rows are near each other.
+A common row key pattern is a website domain.
+If your row keys are domains, you should probably store them in reverse (org.apache.www, org.apache.mail, org.apache.jira). This way, all of the Apache domains are near each other in the table, rather than being spread out based on the first letter of the subdomain.
+
+#### Column [!toc]
+
+A column in HBase consists of a column family and a column qualifier, which are delimited by a `:` (colon) character.
+
+#### Column Family [!toc]
+
+Column families physically colocate a set of columns and their values, often for performance reasons.
+Each column family has a set of storage properties, such as whether its values should be cached in memory, how its data is compressed or its row keys are encoded, and others.
+Each row in a table has the same column families, though a given row might not store anything in a given column family.
+
+#### Column Qualifier [!toc]
+
+A column qualifier is added to a column family to provide the index for a given piece of data.
+Given a column family `content`, a column qualifier might be `content:html`, and another might be `content:pdf`.
+Though column families are fixed at table creation, column qualifiers are mutable and may differ greatly between rows.
+
+#### Cell [!toc]
+
+A cell is a combination of row, column family, and column qualifier, and contains a value and a timestamp, which represents the value's version.
+
+#### Timestamp [!toc]
+
+A timestamp is written alongside each value, and is the identifier for a given version of a value.
+By default, the timestamp represents the time on the RegionServer when the data was written, but you can specify a different timestamp value when you put data into the cell.
+
+## Conceptual View
+
+You can read a very understandable explanation of the HBase data model in the blog post [Understanding HBase and BigTable](https://dzone.com/articles/understanding-hbase-and-bigtab) by Jim R. Wilson.
+Another good explanation is available in the PDF [Introduction to Basic Schema Design](http://0b4af6cdc2f0c5998459-c0245c5c937c5dedcca3f1764ecc9b2f.r43.cf2.rackcdn.com/9353-login1210_khurana.pdf) by Amandeep Khurana.
It may help to read different perspectives to get a solid understanding of HBase schema design.
The linked articles cover the same ground as the information in this section.
-The following example is a slightly modified form of the one on page 2 of the link:http://research.google.com/archive/bigtable.html[BigTable] paper.
+The following example is a slightly modified form of the one on page 2 of the [BigTable](http://research.google.com/archive/bigtable.html) paper.
There is a table called `webtable` that contains two rows (`com.cnn.www` and `com.example.www`) and three column families named `contents`, `anchor`, and `people`.
-In this example, for the first row (`com.cnn.www`), `anchor` contains two columns (`anchor:cssnsi.com`, `anchor:my.look.ca`) and `contents` contains one column (`contents:html`). This example contains 5 versions of the row with the row key `com.cnn.www`, and one version of the row with the row key `com.example.www`.
+In this example, for the first row (`com.cnn.www`), `anchor` contains two columns (`anchor:cssnsi.com`, `anchor:my.look.ca`) and `contents` contains one column (`contents:html`). This example contains 5 versions of the row with the row key `com.cnn.www`, and one version of the row with the row key `com.example.www`.
The `contents:html` column qualifier contains the entire HTML of a given website.
Qualifiers of the `anchor` column family each contain the external site which links to the site represented by the row, along with the text it used in the anchor of its link.
The `people` column family represents people associated with the site.
-.Column Names
-[NOTE]
-====
-By convention, a column name is made of its column family prefix and a _qualifier_.
-For example, the column _contents:html_ is made up of the column family `contents` and the `html` qualifier.
-The colon character (`:`) delimits the column family from the column family _qualifier_.
-====
-
-.Table `webtable`
-[cols="1,1,1,1,1", frame="all", options="header"]
-|===
-|Row Key |Time Stamp |ColumnFamily `contents` |ColumnFamily `anchor`|ColumnFamily `people`
-|"com.cnn.www" |t9 | |anchor:cnnsi.com = "CNN" |
-|"com.cnn.www" |t8 | |anchor:my.look.ca = "CNN.com" |
-|"com.cnn.www" |t6 | contents:html = "..." | |
-|"com.cnn.www" |t5 | contents:html = "..." | |
-|"com.cnn.www" |t3 | contents:html = "..." | |
-|"com.example.www"| t5 | contents:html = "..." | | people:author = "John Doe"
-|===
+
+ By convention, a column name is made of its column family prefix and a _qualifier_. For example,
+ the column _contents:html_ is made up of the column family `contents` and the `html` qualifier.
+ The colon character (`:`) delimits the column family from the column family _qualifier_.
+
+
+#### Table `webtable` [!toc]
+
+| Row Key | Time Stamp | ColumnFamily `contents` | ColumnFamily `anchor` | ColumnFamily `people` |
+| ----------------- | ---------- | ----------------------------- | ----------------------------- | -------------------------- |
+| "com.cnn.www" | t9 | | anchor:cnnsi.com = "CNN" | |
+| "com.cnn.www" | t8 | | anchor:my.look.ca = "CNN.com" | |
+| "com.cnn.www" | t6 | contents:html = "\..." | | |
+| "com.cnn.www" | t5 | contents:html = "\..." | | |
+| "com.cnn.www" | t3 | contents:html = "\..." | | |
+| "com.example.www" | t5 | contents:html = "\..." | | people:author = "John Doe" |
Cells in this table that appear to be empty do not take space, or in fact exist, in HBase.
This is what makes HBase "sparse." A tabular view is not the only possible way to look at data in HBase, or even the most accurate.
The following represents the same information as a multi-dimensional map.
This is only a mock-up for illustrative purposes and may not be strictly accurate.
-[source,json]
-----
+```json
{
"com.cnn.www": {
contents: {
@@ -130,116 +104,96 @@ This is only a mock-up for illustrative purposes and may not be strictly accurat
}
}
}
-----
+```
-[[physical.view]]
-== Physical View
+## Physical View
Although at a conceptual level tables may be viewed as a sparse set of rows, they are physically stored by column family.
A new column qualifier (column_family:column_qualifier) can be added to an existing column family at any time.
-.ColumnFamily `anchor`
-[cols="1,1,1", frame="all", options="header"]
-|===
-|Row Key | Time Stamp |Column Family `anchor`
-|"com.cnn.www" |t9 |`anchor:cnnsi.com = "CNN"`
-|"com.cnn.www" |t8 |`anchor:my.look.ca = "CNN.com"`
-|===
+#### ColumnFamily `anchor` [!toc]
+| Row Key | Time Stamp | Column Family `anchor` |
+| ------------- | ---------- | ------------------------------- |
+| "com.cnn.www" | t9 | `anchor:cnnsi.com = "CNN"` |
+| "com.cnn.www" | t8 | `anchor:my.look.ca = "CNN.com"` |
-.ColumnFamily `contents`
-[cols="1,1,1", frame="all", options="header"]
-|===
-|Row Key |Time Stamp |ColumnFamily `contents:`
-|"com.cnn.www" |t6 |contents:html = "..."
-|"com.cnn.www" |t5 |contents:html = "..."
-|"com.cnn.www" |t3 |contents:html = "..."
-|===
+#### ColumnFamily `contents` [!toc]
+| Row Key | Time Stamp | ColumnFamily `contents:` |
+| ------------- | ---------- | ----------------------------- |
+| "com.cnn.www" | t6 | contents:html = "\..." |
+| "com.cnn.www" | t5 | contents:html = "\..." |
+| "com.cnn.www" | t3 | contents:html = "\..." |
The empty cells shown in the conceptual view are not stored at all.
Thus a request for the value of the `contents:html` column at time stamp `t8` would return no value.
Similarly, a request for an `anchor:my.look.ca` value at time stamp `t9` would return no value.
However, if no timestamp is supplied, the most recent value for a particular column would be returned.
-Given multiple versions, the most recent is also the first one found, since timestamps are stored in descending order.
+Given multiple versions, the most recent is also the first one found, since timestamps are stored in descending order.
Thus a request for the values of all columns in the row `com.cnn.www` if no timestamp is specified would be: the value of `contents:html` from timestamp `t6`, the value of `anchor:cnnsi.com` from timestamp `t9`, the value of `anchor:my.look.ca` from timestamp `t8`.
-For more information about the internals of how Apache HBase stores data, see <>.
+For more information about the internals of how Apache HBase stores data, see [regions.arch](/docs/architecture/regions).
-== Namespace
+## Namespace
A namespace is a logical grouping of tables analogous to a database in relation database systems.
This abstraction lays the groundwork for upcoming multi-tenancy related features:
-* Quota Management (link:https://issues.apache.org/jira/browse/HBASE-8410[HBASE-8410]) - Restrict the amount of resources (i.e. regions, tables) a namespace can consume.
-* Namespace Security Administration (link:https://issues.apache.org/jira/browse/HBASE-9206[HBASE-9206]) - Provide another level of security administration for tenants.
-* Region server groups (link:https://issues.apache.org/jira/browse/HBASE-6721[HBASE-6721]) - A namespace/table can be pinned onto a subset of RegionServers thus guaranteeing a coarse level of isolation.
+- Quota Management ([HBASE-8410](https://issues.apache.org/jira/browse/HBASE-8410)) - Restrict the amount of resources (i.e. regions, tables) a namespace can consume.
+- Namespace Security Administration ([HBASE-9206](https://issues.apache.org/jira/browse/HBASE-9206)) - Provide another level of security administration for tenants.
+- Region server groups ([HBASE-6721](https://issues.apache.org/jira/browse/HBASE-6721)) - A namespace/table can be pinned onto a subset of RegionServers thus guaranteeing a coarse level of isolation.
-[[namespace_creation]]
-=== Namespace management
+### Namespace management
A namespace can be created, removed or altered.
Namespace membership is determined during table creation by specifying a fully-qualified table name of the form:
-[source,xml]
-----
+```xml
:
-----
+```
-.Examples
-====
-[source,bourne]
-----
+#### Examples [#datamodel-namespace-management-examples]
+```bash
#Create a namespace
create_namespace 'my_ns'
-----
-
-[source,bourne]
-----
+```
+```bash
#create my_table in my_ns namespace
create 'my_ns:my_table', 'fam'
-----
-
-[source,bourne]
-----
+```
+```bash
#drop namespace
drop_namespace 'my_ns'
-----
-
-[source,bourne]
-----
+```
+```bash
#alter namespace
alter_namespace 'my_ns', {METHOD => 'set', 'PROPERTY_NAME' => 'PROPERTY_VALUE'}
-----
-====
+```
-[[namespace_special]]
-=== Predefined namespaces
+### Predefined namespaces
There are two predefined special namespaces:
-* hbase - system namespace, used to contain HBase internal tables
-* default - tables with no explicit specified namespace will automatically fall into this namespace
+- hbase - system namespace, used to contain HBase internal tables
+- default - tables with no explicit specified namespace will automatically fall into this namespace
-.Examples
-====
-[source,bourne]
-----
+#### Examples #datamodel-predefined-namespaces-examples
+```bash
#namespace=foo and table qualifier=bar
create 'foo:bar', 'fam'
#namespace=default and table qualifier=bar
create 'bar', 'fam'
-----
-====
+```
+
+### About hbase:namespace table
-[[namespace_table]]
-=== About hbase:namespace table
We used to have a system table called `hbase:namespace` for storing the namespace information.
It introduced some painful bugs in the past, especially that it may hang the master startup thus
@@ -253,20 +207,19 @@ fold its content into the `ns` family in `hbase:meta` table. When upgrading from
migration will be done automatically and the `hbase:namespace` table will be disabled after the
migration is done. You are free to leave it there for sometime and finally drop it.
-For more tails, please see https://issues.apache.org/jira/browse/HBASE-21154[HBASE-21154].
+For more tails, please see https://issues.apache.org/jira/browse/HBASE-21154.
-== Table
+## Table
Tables are declared up front at schema definition time.
-== Row
+## Row
Row keys are uninterpreted bytes.
Rows are lexicographically sorted with the lowest order appearing first in a table.
The empty byte array is used to denote both the start and end of a tables' namespace.
-[[columnfamily]]
-== Column Family
+## Column Family
Columns in Apache HBase are grouped into _column families_.
All column members of a column family have the same prefix.
@@ -279,36 +232,33 @@ Column families must be declared up front at schema definition time whereas colu
Physically, all column family members are stored together on the filesystem.
Because tunings and storage specifications are done at the column family level, it is advised that all column family members have the same general access pattern and size characteristics.
-== Cells
+## Cells
-A _{row, column, version}_ tuple exactly specifies a `cell` in HBase.
+A _\{row, column, version\}_ tuple exactly specifies a `cell` in HBase.
Cell content is uninterpreted bytes
-== Data Model Operations
+## Data Model Operations
The four primary data model operations are Get, Put, Scan, and Delete.
-Operations are applied via link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html[Table] instances.
+Operations are applied via [Table](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html) instances.
-=== Get
+### Get
-link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Get.html[Get] returns attributes for a specified row.
-Gets are executed via link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#get(org.apache.hadoop.hbase.client.Get)[Table.get]
+[Get](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Get.html) returns attributes for a specified row.
+Gets are executed via [Table.get]()
-=== Put
+### Put
-link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Put.html[Put] either adds new rows to a table (if the key is new) or can update existing rows (if the key already exists). Puts are executed via link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#put(org.apache.hadoop.hbase.client.Put)[Table.put] (non-writeBuffer) or link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#batch(java.util.List,java.lang.Object%5B%5D)[Table.batch] (non-writeBuffer)
+[Put](https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Put.html) either adds new rows to a table (if the key is new) or can update existing rows (if the key already exists). Puts are executed via [Table.put](