docs(new docs site): removes old sphinx doc site with gatsby (#10784)
* Remove old apache sphinx doc site with refreshed gatsby * remove rando * add licenses * excluing .mdx files from license checks * fixes * Clean up sphinx references * Addressed comments * colors and tweaks * more fixes * add dummy docs/requirements.txt to satisfy stupid fossa * cp .prettierrc * more licenses * minor touchups * reqs * removing old videos * add github buttons * prettier * tweaks * Eugenia/Fix some of the images and modified some of database pages * add subheadernav * add side headers nav and more responsive design for docs * update resources page and other styling updates * linting * tweaks * removing windows and align:center * update resources * remove links and and card hearder * lots of styling tweaks * Tweaks and minor improvements * lint * fix CI * trigger pre-comimt * rererefix CI Co-authored-by: Maxime Beauchemin <maximebeauchemin@gmail.com> Co-authored-by: Eugenia Moreno <eugenia@Eugenias-MBP.fios-router.home> Co-authored-by: Evan Rusackas <evan@preset.io>
158
docs/src/components/footer.tsx
Normal file
@@ -0,0 +1,158 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { Layout } from 'antd';
|
||||
import { css } from '@emotion/core';
|
||||
import { GithubOutlined, SlackSquareOutlined } from '@ant-design/icons';
|
||||
|
||||
const { Footer } = Layout;
|
||||
|
||||
const footerStyle = css`
|
||||
background-color: #323232;
|
||||
text-align: center;
|
||||
color: #ccc;
|
||||
.apacheLinks {
|
||||
a {
|
||||
color: white;
|
||||
margin: 5px;
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
const copyrightStyle = css`
|
||||
font-size: 11px;
|
||||
color: rgba(255, 255, 255, 0.5);
|
||||
`;
|
||||
const apacheLinksStyle = css`
|
||||
text-align: center;
|
||||
`;
|
||||
const iconContainerStyle = css`
|
||||
padding: 30px;
|
||||
background-color: #323232;
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
.icons {
|
||||
text-align: center;
|
||||
width: 100%;
|
||||
svg {
|
||||
margin-top: 15px;
|
||||
color: #ccc;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
margin: 0 15px;
|
||||
}
|
||||
.svg {
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
margin-top: -15px;
|
||||
margin-left: 10px;
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
const LayoutFooter = () => (
|
||||
<>
|
||||
<Footer css={footerStyle}>
|
||||
<div css={apacheLinksStyle} className="apacheLinks">
|
||||
<a
|
||||
href="https://www.apache.org/security/"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
Security |
|
||||
</a>
|
||||
<a
|
||||
href="https://www.apache.org/foundation/sponsorship.html"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
Donate |
|
||||
</a>
|
||||
|
||||
<a
|
||||
href="https://www.apache.org/foundation/thanks.html"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
Thanks
|
||||
</a>
|
||||
</div>
|
||||
<div css={iconContainerStyle}>
|
||||
<div className="icons">
|
||||
<a
|
||||
href="https://apache-superset.slack.com/join/shared_invite/zt-g8lpruog-HeqpgYrwdfrD5OYhlU7hPQ#/"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
<SlackSquareOutlined className="icon" />
|
||||
</a>
|
||||
<a
|
||||
href="https://github.com/apache/incubator-superset"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
<GithubOutlined className="icon" />
|
||||
</a>
|
||||
<a
|
||||
href="https://stackoverflow.com/questions/tagged/apache-superset+superset"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
<img
|
||||
alt="StackOverflow"
|
||||
src="/images/so-icon.svg"
|
||||
className="icon svg"
|
||||
/>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
<div css={copyrightStyle}>
|
||||
© Copyright
|
||||
{' '}
|
||||
{new Date().getFullYear()}
|
||||
,
|
||||
<a href="http://www.apache.org/" target="_blank" rel="noreferrer">
|
||||
The Apache Software Fountation
|
||||
</a>
|
||||
, Licensed under the Apache
|
||||
<a
|
||||
href="https://www.apache.org/licenses/"
|
||||
target="_blank"
|
||||
rel="noreferrer"
|
||||
>
|
||||
License.
|
||||
</a>
|
||||
{' '}
|
||||
<br />
|
||||
<div>
|
||||
Disclaimer: Apache Superset is an effort undergoing incubation at The
|
||||
Apache Software Foundation (ASF), sponsored by the Apache Incubator.
|
||||
Incubation is required of all newly accepted projects until a further
|
||||
review indicates that the infrastructure, communications, and decision
|
||||
making process have stabilized in a manner consistent with other
|
||||
successful ASF projects. While incubation status is not necessarily a
|
||||
reflection of the completeness or stability of the code, it does
|
||||
indicate that the project has yet to be fully endorsed by the ASF.
|
||||
</div>
|
||||
</div>
|
||||
</Footer>
|
||||
</>
|
||||
);
|
||||
|
||||
export default LayoutFooter;
|
||||
111
docs/src/components/image.tsx
Normal file
@@ -0,0 +1,111 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { useStaticQuery, graphql } from 'gatsby';
|
||||
import Img from 'gatsby-image';
|
||||
|
||||
interface Props {
|
||||
imageName?: string;
|
||||
type?: string;
|
||||
width?: string;
|
||||
height?: string;
|
||||
otherProps?: any;
|
||||
}
|
||||
|
||||
const Image = ({
|
||||
imageName, type, width, height, ...otherProps
|
||||
}: Props) => {
|
||||
const data = useStaticQuery(graphql`
|
||||
query {
|
||||
logoSm: file(relativePath: { eq: "src/images/s.png" }) {
|
||||
childImageSharp {
|
||||
fixed(height: 30) {
|
||||
...GatsbyImageSharpFixed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logoLg: file(relativePath: { eq: "src/images/s.png" }) {
|
||||
childImageSharp {
|
||||
fixed(width: 150) {
|
||||
...GatsbyImageSharpFixed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
incubatorSm: file(relativePath: { eq: "src/images/incubator.png" }) {
|
||||
childImageSharp {
|
||||
fixed(width: 300) {
|
||||
...GatsbyImageSharpFixed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stackoverflow: file(
|
||||
relativePath: { eq: "src/images/stack_overflow.png" }
|
||||
) {
|
||||
childImageSharp {
|
||||
fixed(width: 60) {
|
||||
...GatsbyImageSharpFixed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
docker: file(relativePath: { eq: "src/images/docker.png" }) {
|
||||
childImageSharp {
|
||||
fixed(width: 100) {
|
||||
...GatsbyImageSharpFixed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
preset: file(relativePath: { eq: "src/images/preset.png" }) {
|
||||
childImageSharp {
|
||||
fixed(width: 100) {
|
||||
...GatsbyImageSharpFixed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getAllImages: allImageSharp {
|
||||
edges {
|
||||
node {
|
||||
fixed(height: 70) {
|
||||
...GatsbyImageSharpFixed
|
||||
originalName
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`);
|
||||
|
||||
const filter = data.getAllImages.edges.filter(
|
||||
(n) => n.node.fixed.originalName === imageName,
|
||||
);
|
||||
const imgStyle = width && height ? { width, height } : {};
|
||||
|
||||
return type === 'db' ? (
|
||||
<Img fixed={filter[0]?.node?.fixed} style={imgStyle} imgStyle={imgStyle} />
|
||||
) : (
|
||||
<Img fixed={data[imageName]?.childImageSharp?.fixed} {...otherProps} />
|
||||
);
|
||||
};
|
||||
|
||||
export default Image;
|
||||
123
docs/src/components/layout.scss
Normal file
@@ -0,0 +1,123 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
$brandColor: #20A7C9;
|
||||
$bigPad: 60px;
|
||||
|
||||
html {
|
||||
font-family: sans-serif;
|
||||
-ms-text-size-adjust: 100%;
|
||||
-webkit-text-size-adjust: 100%;
|
||||
}
|
||||
body {
|
||||
background-color: transparent !important;
|
||||
margin: 0;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
color: black;
|
||||
}
|
||||
|
||||
.ant-layout {
|
||||
background-color: #fff;
|
||||
}
|
||||
|
||||
.contentPage {
|
||||
padding-bottom: $bigPad;
|
||||
section {
|
||||
width: 100%;
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
padding: $bigPad 0 0 0;
|
||||
font-size: 17px;
|
||||
&:first-of-type{
|
||||
padding: 40px;
|
||||
background-image: linear-gradient(120deg, lighten($brandColor, 45), lighten($brandColor, 15));
|
||||
border-radius: 10px;
|
||||
}
|
||||
}
|
||||
h1 {
|
||||
font-size: 48px;
|
||||
}
|
||||
h2 {
|
||||
font-size: 32px;
|
||||
}
|
||||
h3 {
|
||||
font-size: 24px;
|
||||
}
|
||||
.title{
|
||||
margin-top: $bigPad;
|
||||
}
|
||||
|
||||
.ant-card-body{
|
||||
display: flex;
|
||||
align-items: stretch;
|
||||
flex-wrap: wrap;
|
||||
.ant-card-grid-hoverable {
|
||||
cursor: pointer;
|
||||
}
|
||||
h4 {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
a {
|
||||
color: #1985a0;
|
||||
}
|
||||
th, td {
|
||||
padding: 15px;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #ddd;
|
||||
}
|
||||
|
||||
.ant-menu:not(.ant-menu-horizontal) .ant-menu-item-selected {
|
||||
background-color: $brandColor;
|
||||
}
|
||||
|
||||
.ant-menu-item-selected a {
|
||||
color: white;
|
||||
}
|
||||
.ant-menu-submenu-selected {
|
||||
color: $brandColor;
|
||||
}
|
||||
|
||||
.ant-menu-horizontal {
|
||||
& > .ant-menu-item:hover, & > .ant-menu-item-active {
|
||||
color: $brandColor !important;
|
||||
border-bottom: 2px solid $brandColor;
|
||||
}
|
||||
& >.ant-menu-item-selected {
|
||||
color: $brandColor;
|
||||
border-bottom: 2px solid $brandColor;
|
||||
a {
|
||||
color: $brandColor;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tr:nth-child(even) {background-color: #f2f2f2;}
|
||||
|
||||
.centered {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
button {
|
||||
background: $brandColor;
|
||||
}
|
||||
235
docs/src/components/layout.tsx
Normal file
@@ -0,0 +1,235 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React, { useState } from 'react';
|
||||
import { Link } from 'gatsby';
|
||||
import {
|
||||
Layout, Menu, Button, Drawer,
|
||||
} from 'antd';
|
||||
import { css } from '@emotion/core';
|
||||
import { MenuOutlined } from '@ant-design/icons';
|
||||
|
||||
import logoSvg from '../images/superset-logo-horiz.svg';
|
||||
import Footer from './footer';
|
||||
import SEO from './seo';
|
||||
import AppMenu from './menu';
|
||||
|
||||
import { getCurrentPath } from '../utils';
|
||||
import 'antd/dist/antd.css';
|
||||
import './layout.scss';
|
||||
|
||||
const { Header, Sider } = Layout;
|
||||
|
||||
const leftPaneWidth = 350;
|
||||
const breakpoints = [576, 768, 992, 1200];
|
||||
|
||||
const mq = breakpoints.map((bp) => `@media (max-width: ${bp}px)`);
|
||||
|
||||
const layoutStyles = css`
|
||||
font-family: Inter;
|
||||
.ant-layout {
|
||||
background-color: white !important;
|
||||
}
|
||||
Button {
|
||||
background: #20a7c9;
|
||||
border-color: #20a7c9;
|
||||
border-radius: 4px;
|
||||
}
|
||||
`;
|
||||
|
||||
const headerStyle = css`
|
||||
background-color: #fff;
|
||||
position: fixed;
|
||||
top: 0;
|
||||
width: 100%;
|
||||
box-shadow: 0 2px 6px 0 rgba(0, 0, 0, 0.12);
|
||||
z-index: 1;
|
||||
.ant-menu {
|
||||
background: transparent;
|
||||
}
|
||||
.ant-menu-horizontal {
|
||||
border-bottom: none;
|
||||
}
|
||||
`;
|
||||
|
||||
const getStartedButtonStyle = css`
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 16px;
|
||||
`;
|
||||
|
||||
const centerLayoutStyle = css`
|
||||
padding: 25px;
|
||||
min-height: 60vw;
|
||||
overflow: auto;
|
||||
padding-right: 250px;
|
||||
.menu {
|
||||
display: none;
|
||||
${[mq[2]]} {
|
||||
display: block;
|
||||
}
|
||||
padding: 25px;
|
||||
}
|
||||
`;
|
||||
|
||||
const sidebarStyle = css`
|
||||
background-color: #fff;
|
||||
position: fixed;
|
||||
top: 64px;
|
||||
bottom: 0px;
|
||||
left: 0px;
|
||||
border-right: 1px solid #bfbfbf;
|
||||
`;
|
||||
|
||||
const contentStyle = css`
|
||||
margin-top: 3px;
|
||||
background-color: white;
|
||||
h2 {
|
||||
font-size: 30px;
|
||||
font-weight: bold;
|
||||
}
|
||||
h3 {
|
||||
font-size: 20px;
|
||||
font-weight: bold;
|
||||
}
|
||||
img {
|
||||
max-width: 800px;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
blockquote {
|
||||
color: rgb(132, 146, 166);
|
||||
padding: 10px 30px;
|
||||
margin: 30px 0px;
|
||||
border-radius: 3px;
|
||||
border-left: 4px solid rgb(56, 211, 236);
|
||||
background: rgb(239, 242, 247);
|
||||
}
|
||||
pre {
|
||||
border: solid #00000033 1px;
|
||||
padding: 5px;
|
||||
background-color: #82ef8217;
|
||||
border-radius: 3px;
|
||||
max-width: 1000px;
|
||||
}
|
||||
p {
|
||||
font-size: 16px;
|
||||
}
|
||||
ul {
|
||||
font-size: 16px;
|
||||
}
|
||||
`;
|
||||
|
||||
const contentLayoutDocsStyle = css`
|
||||
position: fixed;
|
||||
top: 64px;
|
||||
left: ${leftPaneWidth}px;
|
||||
right: 0px;
|
||||
bottom: 0px;
|
||||
overflow: visible;
|
||||
${[mq[2]]} {
|
||||
top: 64px;
|
||||
left: 0;
|
||||
}
|
||||
aside {
|
||||
${[mq[2]]} {
|
||||
display: none;
|
||||
}
|
||||
overflow: auto;
|
||||
}
|
||||
`;
|
||||
|
||||
const logoStyle = css`
|
||||
float: left;
|
||||
margin-left: -50px;
|
||||
margin-top: 5px;
|
||||
heigh: 30px;
|
||||
`;
|
||||
interface Props {
|
||||
children: React.ReactNode;
|
||||
}
|
||||
|
||||
const AppLayout = ({ children }: Props) => {
|
||||
const [showDrawer, setDrawer] = useState(false);
|
||||
const isOnDocsPage = getCurrentPath().indexOf('docs') > -1;
|
||||
return (
|
||||
<Layout css={layoutStyles}>
|
||||
<SEO title="Welcome" />
|
||||
<Header css={headerStyle}>
|
||||
<Link to="/">
|
||||
<img height="50" css={logoStyle} src={logoSvg} alt="logo" />
|
||||
</Link>
|
||||
<Menu mode="horizontal" selectedKeys={getCurrentPath()}>
|
||||
<Menu.Item key="docsintro">
|
||||
<Link to="/docs/intro">Documentation</Link>
|
||||
</Menu.Item>
|
||||
<Menu.Item key="community">
|
||||
<Link to="/community">Community</Link>
|
||||
</Menu.Item>
|
||||
<Menu.Item key="resources">
|
||||
<Link to="/resources"> Resources</Link>
|
||||
</Menu.Item>
|
||||
</Menu>
|
||||
<div css={getStartedButtonStyle}>
|
||||
<Link to="/docs/intro">
|
||||
<Button type="primary" size="medium">
|
||||
Get Started
|
||||
</Button>
|
||||
</Link>
|
||||
</div>
|
||||
</Header>
|
||||
{isOnDocsPage ? (
|
||||
<>
|
||||
<Drawer
|
||||
placement="left"
|
||||
closable={false}
|
||||
onClose={() => setDrawer(false)}
|
||||
visible={showDrawer}
|
||||
getContainer={false}
|
||||
style={{ position: 'absolute' }}
|
||||
>
|
||||
<AppMenu />
|
||||
</Drawer>
|
||||
<Layout css={contentLayoutDocsStyle}>
|
||||
{isOnDocsPage && (
|
||||
<Sider width={leftPaneWidth} css={sidebarStyle}>
|
||||
<AppMenu />
|
||||
</Sider>
|
||||
)}
|
||||
<Layout css={contentStyle}>
|
||||
<div css={centerLayoutStyle}>
|
||||
<MenuOutlined
|
||||
onClick={() => setDrawer(true)}
|
||||
className="menu"
|
||||
/>
|
||||
{children}
|
||||
</div>
|
||||
<Footer />
|
||||
</Layout>
|
||||
</Layout>
|
||||
</>
|
||||
) : (
|
||||
<Layout>
|
||||
{children}
|
||||
<Footer />
|
||||
</Layout>
|
||||
)}
|
||||
</Layout>
|
||||
);
|
||||
};
|
||||
|
||||
export default AppLayout;
|
||||
57
docs/src/components/menu.tsx
Normal file
@@ -0,0 +1,57 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { useMenus, Link } from 'docz';
|
||||
import { Menu } from 'antd';
|
||||
import { getActiveMenuItem } from '../utils';
|
||||
|
||||
const { SubMenu } = Menu;
|
||||
|
||||
export default () => {
|
||||
const menus = useMenus();
|
||||
const { openKey, selectedKey } = getActiveMenuItem(menus);
|
||||
return (
|
||||
<Menu
|
||||
mode="inline"
|
||||
defaultOpenKeys={[openKey]}
|
||||
defaultSelectedKeys={[selectedKey]}
|
||||
>
|
||||
{menus.map((menuItem) => {
|
||||
if (menuItem.menu?.length > 0) {
|
||||
return (
|
||||
<SubMenu key={menuItem.id} title={menuItem.name}>
|
||||
{menuItem.menu
|
||||
.sort((a, b) => a.index - b.index)
|
||||
.map((submenuItem) => (
|
||||
<Menu.Item key={submenuItem.id}>
|
||||
<Link to={submenuItem.route}>{submenuItem.name}</Link>
|
||||
</Menu.Item>
|
||||
))}
|
||||
</SubMenu>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<Menu.Item key={menuItem.id}>
|
||||
<Link to={menuItem.route}>{menuItem.name}</Link>
|
||||
</Menu.Item>
|
||||
);
|
||||
})}
|
||||
</Menu>
|
||||
);
|
||||
};
|
||||
42
docs/src/components/next.tsx
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { useMenus } from 'docz';
|
||||
import { getPreviousAndNextUrls } from '../utils';
|
||||
|
||||
const nextButtons = () => {
|
||||
const menus = useMenus();
|
||||
const [prevUrl, nextUrl] = getPreviousAndNextUrls(menus);
|
||||
return (
|
||||
<>
|
||||
{prevUrl && (
|
||||
<a href={prevUrl} className="ant-btn">
|
||||
Prev
|
||||
</a>
|
||||
)}
|
||||
{nextUrl && (
|
||||
<a href={nextUrl} className="ant-btn">
|
||||
Next
|
||||
</a>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default nextButtons;
|
||||
47
docs/src/components/select.tsx
Normal file
@@ -0,0 +1,47 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { Select } from 'antd';
|
||||
import querystring from 'querystring';
|
||||
|
||||
const { Option } = Select;
|
||||
|
||||
const versions = ['1', '2'];
|
||||
|
||||
export default function VersionSelect() {
|
||||
const { version } = querystring.parse(window.location.search.substr(1));
|
||||
const handleChange = (e) => {
|
||||
// @ts-ignore
|
||||
window.location = `/docs/intro?version=${e}`;
|
||||
};
|
||||
return (
|
||||
<div>
|
||||
version:
|
||||
<Select
|
||||
defaultValue={version || 1}
|
||||
style={{ width: 120 }}
|
||||
onChange={handleChange}
|
||||
>
|
||||
{versions.map((e) => (
|
||||
<Option value={e}>{e}</Option>
|
||||
))}
|
||||
</Select>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
104
docs/src/components/seo.js
Normal file
@@ -0,0 +1,104 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import PropTypes from 'prop-types';
|
||||
import { Helmet } from 'react-helmet';
|
||||
import { useStaticQuery, graphql } from 'gatsby';
|
||||
import favicon from '../images/favicon.png';
|
||||
|
||||
function SEO({
|
||||
description, lang, meta, title,
|
||||
}) {
|
||||
const { site } = useStaticQuery(
|
||||
graphql`
|
||||
query {
|
||||
site {
|
||||
siteMetadata {
|
||||
title
|
||||
description
|
||||
author
|
||||
}
|
||||
}
|
||||
}
|
||||
`,
|
||||
);
|
||||
|
||||
const metaDescription = description || site.siteMetadata.description;
|
||||
|
||||
return (
|
||||
<Helmet
|
||||
htmlAttributes={{
|
||||
lang,
|
||||
}}
|
||||
title={title}
|
||||
titleTemplate={`%s | ${site.siteMetadata.title}`}
|
||||
meta={[
|
||||
{
|
||||
name: 'description',
|
||||
content: metaDescription,
|
||||
},
|
||||
{
|
||||
property: 'og:title',
|
||||
content: title,
|
||||
},
|
||||
{
|
||||
property: 'og:description',
|
||||
content: metaDescription,
|
||||
},
|
||||
{
|
||||
property: 'og:type',
|
||||
content: 'website',
|
||||
},
|
||||
{
|
||||
name: 'twitter:card',
|
||||
content: 'summary',
|
||||
},
|
||||
{
|
||||
name: 'twitter:creator',
|
||||
content: site.siteMetadata.author,
|
||||
},
|
||||
{
|
||||
name: 'twitter:title',
|
||||
content: title,
|
||||
},
|
||||
{
|
||||
name: 'twitter:description',
|
||||
content: metaDescription,
|
||||
},
|
||||
].concat(meta)}
|
||||
>
|
||||
<link rel="icon" href={favicon} />
|
||||
</Helmet>
|
||||
);
|
||||
}
|
||||
|
||||
SEO.defaultProps = {
|
||||
lang: 'en',
|
||||
meta: [],
|
||||
description: '',
|
||||
};
|
||||
|
||||
SEO.propTypes = {
|
||||
description: PropTypes.string,
|
||||
lang: PropTypes.string,
|
||||
meta: PropTypes.arrayOf(PropTypes.object),
|
||||
title: PropTypes.string.isRequired,
|
||||
};
|
||||
|
||||
export default SEO;
|
||||
35
docs/src/components/sidenav.tsx
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { Anchor } from 'antd';
|
||||
import { useMenus } from 'docz';
|
||||
import { getActiveMenuItem } from '../utils';
|
||||
|
||||
const { Link } = Anchor;
|
||||
|
||||
const HeaderNav = () => {
|
||||
const menus = useMenus();
|
||||
const { headings } = getActiveMenuItem(menus);
|
||||
const headsList = headings.map((e) => (
|
||||
<Link href={`#${e.slug}`} title={e.value} />
|
||||
));
|
||||
return <Anchor>{headsList}</Anchor>;
|
||||
};
|
||||
|
||||
export default HeaderNav;
|
||||
77
docs/src/gatsby-theme-docz/index.tsx
Normal file
@@ -0,0 +1,77 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { theme, useConfig } from 'docz';
|
||||
import { ThemeProvider } from 'theme-ui';
|
||||
import { css } from '@emotion/core';
|
||||
import SEO from '../components/seo';
|
||||
import Layout from '../components/layout';
|
||||
import HeaderNav from '../components/sidenav';
|
||||
import NextLinks from '../components/next';
|
||||
|
||||
import 'antd/dist/antd.css';
|
||||
|
||||
interface Props {
|
||||
children: React.ReactNode;
|
||||
}
|
||||
|
||||
const docLayout = css`
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
.headerNav {
|
||||
position: fixed;
|
||||
top: 64px;
|
||||
right: 0;
|
||||
width: 250px;
|
||||
padding: 16px;
|
||||
height: 605px;
|
||||
overflow: auto;
|
||||
ul {
|
||||
font-size: 12px;
|
||||
li {
|
||||
height: 25px;
|
||||
line-height: 25px;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
const Theme = ({ children }: Props) => {
|
||||
const config = useConfig();
|
||||
return (
|
||||
<ThemeProvider theme={config}>
|
||||
<Layout>
|
||||
<SEO title="Documents" />
|
||||
<div css={docLayout}>
|
||||
<div>{children}</div>
|
||||
<div className="headerNav">
|
||||
<HeaderNav />
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<NextLinks />
|
||||
</div>
|
||||
</Layout>
|
||||
</ThemeProvider>
|
||||
);
|
||||
};
|
||||
|
||||
// @ts-ignore
|
||||
export default theme()(Theme);
|
||||
BIN
docs/src/images/apache-drill.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/src/images/apache-druid.jpeg
Normal file
|
After Width: | Height: | Size: 210 KiB |
BIN
docs/src/images/apache-druid.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
69
docs/src/images/apache-hive.svg
Normal file
BIN
docs/src/images/apache-impala.png
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
BIN
docs/src/images/apache-kylin.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
docs/src/images/apacheSupersetHoriz.png
Normal file
|
After Width: | Height: | Size: 119 KiB |
BIN
docs/src/images/aws-redshift.png
Normal file
|
After Width: | Height: | Size: 9.0 KiB |
BIN
docs/src/images/clickhouse.png
Normal file
|
After Width: | Height: | Size: 7.5 KiB |
BIN
docs/src/images/dashboard.png
Normal file
|
After Width: | Height: | Size: 442 KiB |
BIN
docs/src/images/docker.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/src/images/druid.png
Normal file
|
After Width: | Height: | Size: 119 KiB |
BIN
docs/src/images/exasol.png
Normal file
|
After Width: | Height: | Size: 8.4 KiB |
BIN
docs/src/images/explorer.png
Normal file
|
After Width: | Height: | Size: 626 KiB |
BIN
docs/src/images/favicon.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/src/images/firebird.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/src/images/gatsby-astronaut.png
Normal file
|
After Width: | Height: | Size: 163 KiB |
BIN
docs/src/images/gatsby-icon.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
docs/src/images/googleBQ.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
BIN
docs/src/images/greenplum.jpeg
Normal file
|
After Width: | Height: | Size: 7.4 KiB |
BIN
docs/src/images/greenplum.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
docs/src/images/ibmdb2.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/src/images/incubator.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/src/images/monet.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
docs/src/images/msql.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
1062
docs/src/images/mysql.html
Normal file
BIN
docs/src/images/mysql.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/src/images/oracle-logo.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
docs/src/images/oracle.png
Normal file
|
After Width: | Height: | Size: 8.0 KiB |
BIN
docs/src/images/oraclelogo.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/src/images/postgresql.jpg
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
docs/src/images/postsql.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
docs/src/images/preset.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
33
docs/src/images/preset.svg
Normal file
BIN
docs/src/images/presto-og.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/src/images/s.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/src/images/snowflake.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
docs/src/images/sqllite.jpg
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
docs/src/images/sqllite.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
docs/src/images/stack_overflow.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
52
docs/src/images/superset-logo-horiz-apache.svg
Normal file
@@ -0,0 +1,52 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg width="100%" height="100%" viewBox="0 0 266 69" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
|
||||
<path d="M73.79,15.23C67.32,15.23 61.36,18.87 55.6,25.23C49.94,18.77 43.88,15.23 37.11,15.23C25.9,15.23 17.72,23.23 17.72,34C17.72,44.77 25.9,52.67 37.11,52.67C44,52.67 49.34,49.44 55.3,43C61.06,49.46 66.92,52.69 73.79,52.69C85,52.67 93.18,44.8 93.18,34C93.18,23.2 85,15.23 73.79,15.23ZM37.19,41.37C32.44,41.37 29.61,38.24 29.61,34.1C29.61,29.96 32.44,26.74 37.19,26.74C41.19,26.74 44.46,29.96 48,34.3C44.66,38.34 41.13,41.37 37.19,41.37ZM73.45,41.37C69.51,41.37 66.18,38.24 62.64,34.1C66.28,29.76 69.41,26.74 73.45,26.74C78.2,26.74 81,30 81,34.1C81,38.2 78.2,41.37 73.45,41.37Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M63.74,50L71.28,41C68.28,40.1 65.51,37.4 62.64,34.05L55.3,43C57.703,45.788 60.556,48.154 63.74,50Z" style="fill:rgb(32,167,201);fill-rule:nonzero;"/>
|
||||
<g id="Main">
|
||||
<g id="Superset">
|
||||
<g id="Full-Lockup-With-Text">
|
||||
<g id="Group-7">
|
||||
<g id="Group-17">
|
||||
<g id="Superset-Copy">
|
||||
<g>
|
||||
<path d="M116.72,40.39C116.751,39.474 116.36,38.592 115.66,38C114.539,37.193 113.272,36.609 111.93,36.28C109.421,35.66 107.048,34.582 104.93,33.1C103.37,31.922 102.481,30.053 102.55,28.1C102.528,26.015 103.555,24.052 105.28,22.88C107.327,21.458 109.79,20.754 112.28,20.88C114.812,20.767 117.301,21.577 119.28,23.16C120.994,24.509 121.961,26.601 121.88,28.78L121.88,28.88L116.82,28.88C116.861,27.778 116.419,26.71 115.61,25.96C114.667,25.171 113.457,24.773 112.23,24.85C111.077,24.779 109.934,25.104 108.99,25.77C108.263,26.344 107.842,27.224 107.85,28.15C107.867,28.99 108.298,29.769 109,30.23C110.313,31.008 111.726,31.603 113.2,32C115.582,32.553 117.81,33.633 119.72,35.16C121.197,36.462 122.013,38.362 121.94,40.33C122.008,42.418 121.013,44.404 119.3,45.6C117.238,46.985 114.78,47.662 112.3,47.53C109.663,47.589 107.072,46.823 104.89,45.34C102.838,43.996 101.66,41.648 101.81,39.2L101.81,39.09L107,39.09C106.889,40.389 107.42,41.664 108.42,42.5C109.597,43.291 111.004,43.671 112.42,43.58C113.571,43.658 114.716,43.348 115.67,42.7C116.371,42.144 116.762,41.283 116.72,40.39Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M137,44.4C136.453,45.359 135.672,46.164 134.73,46.74C132.116,48.188 128.835,47.72 126.73,45.6C125.583,44.267 125.01,42.24 125.01,39.52L125.01,27.85L130.21,27.85L130.21,39.58C130.131,40.629 130.379,41.678 130.92,42.58C131.434,43.208 132.22,43.551 133.03,43.5C133.767,43.516 134.498,43.38 135.18,43.1C135.764,42.836 136.268,42.422 136.64,41.9L136.64,27.85L141.86,27.85L141.86,47.18L137.41,47.18L137,44.4Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M162.87,38.05C162.99,40.508 162.286,42.937 160.87,44.95C159.569,46.68 157.492,47.658 155.33,47.56C154.4,47.575 153.478,47.384 152.63,47C151.843,46.61 151.158,46.042 150.63,45.34L150.63,54.62L145.43,54.62L145.43,27.85L150.13,27.85L150.44,30.13C150.968,29.331 151.673,28.664 152.5,28.18C153.363,27.707 154.336,27.469 155.32,27.49C157.535,27.403 159.644,28.467 160.89,30.3C162.313,32.49 163.013,35.072 162.89,37.68L162.87,38.05ZM157.65,37.65C157.71,36.118 157.397,34.595 156.74,33.21C156.228,32.144 155.132,31.476 153.95,31.51C153.253,31.49 152.562,31.656 151.95,31.99C151.393,32.322 150.937,32.799 150.63,33.37L150.63,41.86C150.942,42.394 151.4,42.828 151.95,43.11C152.573,43.411 153.259,43.558 153.95,43.54C155.082,43.61 156.161,43.032 156.73,42.05C157.376,40.819 157.684,39.439 157.62,38.05L157.65,37.65Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M174.21,47.56C171.699,47.674 169.258,46.696 167.52,44.88C165.828,43.026 164.93,40.579 165.02,38.07L165.02,37.36C164.918,34.784 165.761,32.258 167.39,30.26C170.696,26.757 176.29,26.572 179.82,29.85C181.338,31.617 182.119,33.903 182,36.23L182,39.07L170.43,39.07L170.43,39.18C170.48,40.34 170.933,41.447 171.71,42.31C172.51,43.146 173.634,43.595 174.79,43.54C175.762,43.562 176.732,43.444 177.67,43.19C178.539,42.91 179.377,42.542 180.17,42.09L181.58,45.32C180.656,46.037 179.609,46.579 178.49,46.92C177.108,47.366 175.662,47.582 174.21,47.56ZM173.74,31.56C172.841,31.53 171.983,31.946 171.45,32.67C170.859,33.531 170.513,34.537 170.45,35.58L170.5,35.67L176.9,35.67L176.9,35.21C176.949,34.261 176.674,33.322 176.12,32.55C175.546,31.835 174.655,31.446 173.74,31.51L173.74,31.56Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M195.3,32.33L193.38,32.33C192.711,32.303 192.047,32.47 191.47,32.81C190.964,33.141 190.567,33.614 190.33,34.17L190.33,47.18L185.13,47.18L185.13,27.85L190,27.85L190.23,30.71C190.616,29.787 191.224,28.972 192,28.34C192.71,27.776 193.594,27.476 194.5,27.49C194.741,27.488 194.982,27.508 195.22,27.55L195.89,27.7L195.3,32.33Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M208.32,41.86C208.308,41.257 207.996,40.698 207.49,40.37C206.544,39.809 205.498,39.435 204.41,39.27C202.553,38.979 200.785,38.271 199.24,37.2C198.087,36.32 197.433,34.93 197.49,33.48C197.487,31.814 198.265,30.24 199.59,29.23C201.198,28.003 203.19,27.386 205.21,27.49C207.312,27.38 209.391,27.991 211.1,29.22C212.489,30.234 213.279,31.882 213.2,33.6L213.2,33.71L208.2,33.71C208.226,33.002 207.958,32.314 207.46,31.81C206.859,31.287 206.074,31.024 205.28,31.08C204.561,31.04 203.85,31.26 203.28,31.7C202.816,32.075 202.55,32.644 202.56,33.24C202.551,33.826 202.837,34.379 203.32,34.71C204.271,35.243 205.318,35.582 206.4,35.71C208.308,35.991 210.126,36.71 211.71,37.81C212.862,38.729 213.506,40.148 213.44,41.62C213.458,43.325 212.62,44.93 211.21,45.89C209.473,47.062 207.403,47.641 205.31,47.54C203.1,47.652 200.925,46.939 199.21,45.54C197.817,44.508 196.996,42.873 197,41.14L197,41.04L201.77,41.04C201.72,41.907 202.093,42.746 202.77,43.29C203.515,43.784 204.397,44.029 205.29,43.99C206.067,44.039 206.838,43.835 207.49,43.41C208.012,43.069 208.326,42.484 208.32,41.86Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M224.86,47.56C222.352,47.674 219.914,46.696 218.18,44.88C216.488,43.026 215.59,40.579 215.68,38.07L215.68,37.36C215.579,34.786 216.419,32.261 218.04,30.26C221.346,26.757 226.94,26.572 230.47,29.85C231.992,31.615 232.77,33.903 232.64,36.23L232.64,39.07L221.09,39.07L221.09,39.18C221.137,40.339 221.587,41.446 222.36,42.31C223.162,43.149 224.291,43.598 225.45,43.54C226.419,43.562 227.385,43.444 228.32,43.19C229.193,42.912 230.034,42.544 230.83,42.09L232.24,45.32C231.315,46.035 230.268,46.577 229.15,46.92C227.765,47.366 226.315,47.582 224.86,47.56ZM224.4,31.56C223.5,31.526 222.641,31.943 222.11,32.67C221.519,33.532 221.174,34.537 221.11,35.58L221.17,35.67L227.57,35.67L227.57,35.21C227.619,34.261 227.344,33.322 226.79,32.55C226.214,31.832 225.318,31.442 224.4,31.51L224.4,31.56Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M242.35,23.11L242.35,27.85L245.61,27.85L245.61,31.51L242.35,31.51L242.35,41.36C242.296,41.937 242.465,42.513 242.82,42.97C243.15,43.299 243.604,43.474 244.07,43.45C244.304,43.451 244.538,43.435 244.77,43.4C245.003,43.363 245.233,43.313 245.46,43.25L245.91,47.02C245.408,47.195 244.893,47.332 244.37,47.43C243.834,47.516 243.293,47.56 242.75,47.56C241.219,47.662 239.712,47.126 238.59,46.08C237.508,44.765 236.984,43.077 237.13,41.38L237.13,31.51L234.31,31.51L234.31,27.85L237.13,27.85L237.13,23.11L242.35,23.11Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<path d="M55.6,25.22C53.213,22.392 50.378,19.973 47.21,18.06L39.66,27.16C42.53,28.16 45.07,30.74 47.77,34.03L48.07,34.24L55.6,25.22Z" style="fill:rgb(32,167,201);fill-rule:nonzero;"/>
|
||||
<path d="M130.22,21.43L127.44,21.43L126.9,23L125.3,23L128,15.7L129.63,15.7L132.35,23L130.74,23L130.22,21.43ZM127.83,20.28L129.83,20.28L128.83,17.41L127.83,20.28Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M138.29,20.44C138.327,21.136 138.11,21.822 137.68,22.37C137.051,23.065 136.051,23.297 135.18,22.95C134.939,22.844 134.727,22.683 134.56,22.48L134.56,25.08L133,25.08L133,17.58L134.43,17.58L134.52,18.22C134.682,17.988 134.899,17.8 135.15,17.67C135.416,17.536 135.712,17.471 136.01,17.48C136.667,17.446 137.3,17.738 137.7,18.26C138.13,18.862 138.345,19.591 138.31,20.33L138.29,20.44ZM136.7,20.33C136.721,19.896 136.624,19.464 136.42,19.08C136.254,18.769 135.922,18.582 135.57,18.6C135.358,18.595 135.149,18.644 134.96,18.74C134.792,18.829 134.654,18.965 134.56,19.13L134.56,21.51C134.659,21.661 134.798,21.782 134.96,21.86C135.155,21.945 135.367,21.986 135.58,21.98C135.918,22.006 136.245,21.844 136.43,21.56C136.627,21.217 136.721,20.825 136.7,20.43L136.7,20.33Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M142.72,23C142.667,22.88 142.617,22.75 142.57,22.61C142.529,22.473 142.502,22.332 142.49,22.19C142.323,22.452 142.102,22.673 141.84,22.84C141.544,23.034 141.194,23.129 140.84,23.11C140.326,23.143 139.817,22.986 139.41,22.67C139.07,22.367 138.886,21.925 138.91,21.47C138.893,20.972 139.146,20.501 139.57,20.24C140.164,19.908 140.841,19.755 141.52,19.8L142.41,19.8L142.41,19.37C142.421,19.146 142.342,18.926 142.19,18.76C142.006,18.605 141.77,18.526 141.53,18.54C141.315,18.526 141.102,18.59 140.93,18.72C140.786,18.836 140.708,19.016 140.72,19.2L139.19,19.2C139.175,18.718 139.424,18.265 139.84,18.02C140.371,17.666 141.002,17.491 141.64,17.52C142.248,17.491 142.849,17.663 143.35,18.01C143.79,18.341 144.038,18.87 144.01,19.42L144.01,21.66C144.007,21.899 144.031,22.137 144.08,22.37C144.119,22.6 144.182,22.824 144.27,23.04L142.72,23ZM141.27,22C141.533,22.006 141.792,21.941 142.02,21.81C142.21,21.714 142.364,21.56 142.46,21.37L142.46,20.6L141.57,20.6C141.292,20.581 141.017,20.666 140.8,20.84C140.635,20.979 140.54,21.184 140.54,21.4C140.537,21.569 140.61,21.731 140.74,21.84C140.892,21.955 141.08,22.011 141.27,22Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M147.73,22C147.977,22.01 148.22,21.928 148.41,21.77C148.579,21.618 148.671,21.397 148.66,21.17L150.11,21.17C150.127,21.709 149.876,22.223 149.44,22.54C148.949,22.921 148.341,23.119 147.72,23.1C146.975,23.139 146.247,22.859 145.72,22.33C145.223,21.786 144.963,21.066 145,20.33L145,20.18C144.965,19.446 145.22,18.727 145.71,18.18C146.761,17.225 148.354,17.141 149.5,17.98C149.939,18.35 150.177,18.907 150.14,19.48L148.69,19.48C148.7,19.223 148.61,18.972 148.44,18.78C148.263,18.589 148.01,18.487 147.75,18.5C147.39,18.467 147.041,18.647 146.86,18.96C146.674,19.317 146.584,19.717 146.6,20.12L146.6,20.27C146.583,20.673 146.673,21.073 146.86,21.43C147.02,21.767 147.357,21.988 147.73,22Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M152.58,18.26C152.958,17.76 153.549,17.465 154.176,17.465C154.701,17.465 155.206,17.672 155.58,18.04C155.973,18.53 156.163,19.154 156.11,19.78L156.11,23L154.52,23L154.52,19.77C154.553,19.451 154.468,19.13 154.28,18.87C154.092,18.678 153.828,18.579 153.56,18.6C153.361,18.598 153.164,18.635 152.98,18.71C152.82,18.778 152.679,18.885 152.57,19.02L152.57,23.02L151,23.02L151,15.18L152.59,15.18L152.58,18.26Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M159.92,23.11C159.179,23.132 158.459,22.859 157.92,22.35C157.41,21.848 157.134,21.155 157.16,20.44L157.16,20.24C157.131,19.505 157.389,18.788 157.88,18.24C158.377,17.71 159.085,17.428 159.81,17.47C160.491,17.427 161.16,17.666 161.66,18.13C162.116,18.611 162.354,19.258 162.32,19.92L162.32,20.71L158.81,20.71C158.82,21.041 158.956,21.356 159.19,21.59C159.443,21.83 159.782,21.956 160.13,21.94C160.426,21.944 160.722,21.91 161.01,21.84C161.272,21.76 161.527,21.656 161.77,21.53L162.2,22.44C161.915,22.644 161.597,22.796 161.26,22.89C160.827,23.032 160.375,23.107 159.92,23.11ZM159.78,18.6C159.513,18.592 159.257,18.711 159.09,18.92C158.907,19.153 158.795,19.435 158.77,19.73L160.77,19.73L160.77,19.6C160.783,19.329 160.697,19.063 160.53,18.85C160.331,18.658 160.055,18.566 159.78,18.6Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M248.77,23.62L247.92,23.62L247.92,26.28L247.37,26.28L247.37,23.62L246.52,23.62L246.52,23.14L248.77,23.14L248.77,23.62ZM251.93,24.27L251.05,26.27L250.75,26.27L249.84,24.17L249.84,26.26L249.3,26.26L249.3,23.14L249.98,23.14L250.92,25.42L251.92,23.14L252.57,23.14L252.57,26.28L252.02,26.28L251.93,24.27Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 13 KiB |
45
docs/src/images/superset-logo-horiz.svg
Normal file
@@ -0,0 +1,45 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg width="100%" height="100%" viewBox="0 0 266 69" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
|
||||
<path d="M73.79,15.23C67.32,15.23 61.36,18.87 55.6,25.23C49.94,18.77 43.88,15.23 37.11,15.23C25.9,15.23 17.72,23.23 17.72,34C17.72,44.77 25.9,52.67 37.11,52.67C44,52.67 49.34,49.44 55.3,43C61.06,49.46 66.92,52.69 73.79,52.69C85,52.67 93.18,44.8 93.18,34C93.18,23.2 85,15.23 73.79,15.23ZM37.19,41.37C32.44,41.37 29.61,38.24 29.61,34.1C29.61,29.96 32.44,26.74 37.19,26.74C41.19,26.74 44.46,29.96 48,34.3C44.66,38.34 41.13,41.37 37.19,41.37ZM73.45,41.37C69.51,41.37 66.18,38.24 62.64,34.1C66.28,29.76 69.41,26.74 73.45,26.74C78.2,26.74 81,30 81,34.1C81,38.2 78.2,41.37 73.45,41.37Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M63.74,50L71.28,41C68.28,40.1 65.51,37.4 62.64,34.05L55.3,43C57.703,45.788 60.556,48.154 63.74,50Z" style="fill:rgb(32,167,201);fill-rule:nonzero;"/>
|
||||
<g id="Main">
|
||||
<g id="Superset">
|
||||
<g id="Full-Lockup-With-Text">
|
||||
<g id="Group-7">
|
||||
<g id="Group-17">
|
||||
<g id="Superset-Copy">
|
||||
<g>
|
||||
<path d="M116.72,40.39C116.751,39.474 116.36,38.592 115.66,38C114.539,37.193 113.272,36.609 111.93,36.28C109.421,35.66 107.048,34.582 104.93,33.1C103.37,31.922 102.481,30.053 102.55,28.1C102.528,26.015 103.555,24.052 105.28,22.88C107.327,21.458 109.79,20.754 112.28,20.88C114.812,20.767 117.301,21.577 119.28,23.16C120.994,24.509 121.961,26.601 121.88,28.78L121.88,28.88L116.82,28.88C116.861,27.778 116.419,26.71 115.61,25.96C114.667,25.171 113.457,24.773 112.23,24.85C111.077,24.779 109.934,25.104 108.99,25.77C108.263,26.344 107.842,27.224 107.85,28.15C107.867,28.99 108.298,29.769 109,30.23C110.313,31.008 111.726,31.603 113.2,32C115.582,32.553 117.81,33.633 119.72,35.16C121.197,36.462 122.013,38.362 121.94,40.33C122.008,42.418 121.013,44.404 119.3,45.6C117.238,46.985 114.78,47.662 112.3,47.53C109.663,47.589 107.072,46.823 104.89,45.34C102.838,43.996 101.66,41.648 101.81,39.2L101.81,39.09L107,39.09C106.889,40.389 107.42,41.664 108.42,42.5C109.597,43.291 111.004,43.671 112.42,43.58C113.571,43.658 114.716,43.348 115.67,42.7C116.371,42.144 116.762,41.283 116.72,40.39Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M137,44.4C136.453,45.359 135.672,46.164 134.73,46.74C132.116,48.188 128.835,47.72 126.73,45.6C125.583,44.267 125.01,42.24 125.01,39.52L125.01,27.85L130.21,27.85L130.21,39.58C130.131,40.629 130.379,41.678 130.92,42.58C131.434,43.208 132.22,43.551 133.03,43.5C133.767,43.516 134.498,43.38 135.18,43.1C135.764,42.836 136.268,42.422 136.64,41.9L136.64,27.85L141.86,27.85L141.86,47.18L137.41,47.18L137,44.4Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M162.87,38.05C162.99,40.508 162.286,42.937 160.87,44.95C159.569,46.68 157.492,47.658 155.33,47.56C154.4,47.575 153.478,47.384 152.63,47C151.843,46.61 151.158,46.042 150.63,45.34L150.63,54.62L145.43,54.62L145.43,27.85L150.13,27.85L150.44,30.13C150.968,29.331 151.673,28.664 152.5,28.18C153.363,27.707 154.336,27.469 155.32,27.49C157.535,27.403 159.644,28.467 160.89,30.3C162.313,32.49 163.013,35.072 162.89,37.68L162.87,38.05ZM157.65,37.65C157.71,36.118 157.397,34.595 156.74,33.21C156.228,32.144 155.132,31.476 153.95,31.51C153.253,31.49 152.562,31.656 151.95,31.99C151.393,32.322 150.937,32.799 150.63,33.37L150.63,41.86C150.942,42.394 151.4,42.828 151.95,43.11C152.573,43.411 153.259,43.558 153.95,43.54C155.082,43.61 156.161,43.032 156.73,42.05C157.376,40.819 157.684,39.439 157.62,38.05L157.65,37.65Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M174.21,47.56C171.699,47.674 169.258,46.696 167.52,44.88C165.828,43.026 164.93,40.579 165.02,38.07L165.02,37.36C164.918,34.784 165.761,32.258 167.39,30.26C170.696,26.757 176.29,26.572 179.82,29.85C181.338,31.617 182.119,33.903 182,36.23L182,39.07L170.43,39.07L170.43,39.18C170.48,40.34 170.933,41.447 171.71,42.31C172.51,43.146 173.634,43.595 174.79,43.54C175.762,43.562 176.732,43.444 177.67,43.19C178.539,42.91 179.377,42.542 180.17,42.09L181.58,45.32C180.656,46.037 179.609,46.579 178.49,46.92C177.108,47.366 175.662,47.582 174.21,47.56ZM173.74,31.56C172.841,31.53 171.983,31.946 171.45,32.67C170.859,33.531 170.513,34.537 170.45,35.58L170.5,35.67L176.9,35.67L176.9,35.21C176.949,34.261 176.674,33.322 176.12,32.55C175.546,31.835 174.655,31.446 173.74,31.51L173.74,31.56Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M195.3,32.33L193.38,32.33C192.711,32.303 192.047,32.47 191.47,32.81C190.964,33.141 190.567,33.614 190.33,34.17L190.33,47.18L185.13,47.18L185.13,27.85L190,27.85L190.23,30.71C190.616,29.787 191.224,28.972 192,28.34C192.71,27.776 193.594,27.476 194.5,27.49C194.741,27.488 194.982,27.508 195.22,27.55L195.89,27.7L195.3,32.33Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M208.32,41.86C208.308,41.257 207.996,40.698 207.49,40.37C206.544,39.809 205.498,39.435 204.41,39.27C202.553,38.979 200.785,38.271 199.24,37.2C198.087,36.32 197.433,34.93 197.49,33.48C197.487,31.814 198.265,30.24 199.59,29.23C201.198,28.003 203.19,27.386 205.21,27.49C207.312,27.38 209.391,27.991 211.1,29.22C212.489,30.234 213.279,31.882 213.2,33.6L213.2,33.71L208.2,33.71C208.226,33.002 207.958,32.314 207.46,31.81C206.859,31.287 206.074,31.024 205.28,31.08C204.561,31.04 203.85,31.26 203.28,31.7C202.816,32.075 202.55,32.644 202.56,33.24C202.551,33.826 202.837,34.379 203.32,34.71C204.271,35.243 205.318,35.582 206.4,35.71C208.308,35.991 210.126,36.71 211.71,37.81C212.862,38.729 213.506,40.148 213.44,41.62C213.458,43.325 212.62,44.93 211.21,45.89C209.473,47.062 207.403,47.641 205.31,47.54C203.1,47.652 200.925,46.939 199.21,45.54C197.817,44.508 196.996,42.873 197,41.14L197,41.04L201.77,41.04C201.72,41.907 202.093,42.746 202.77,43.29C203.515,43.784 204.397,44.029 205.29,43.99C206.067,44.039 206.838,43.835 207.49,43.41C208.012,43.069 208.326,42.484 208.32,41.86Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M224.86,47.56C222.352,47.674 219.914,46.696 218.18,44.88C216.488,43.026 215.59,40.579 215.68,38.07L215.68,37.36C215.579,34.786 216.419,32.261 218.04,30.26C221.346,26.757 226.94,26.572 230.47,29.85C231.992,31.615 232.77,33.903 232.64,36.23L232.64,39.07L221.09,39.07L221.09,39.18C221.137,40.339 221.587,41.446 222.36,42.31C223.162,43.149 224.291,43.598 225.45,43.54C226.419,43.562 227.385,43.444 228.32,43.19C229.193,42.912 230.034,42.544 230.83,42.09L232.24,45.32C231.315,46.035 230.268,46.577 229.15,46.92C227.765,47.366 226.315,47.582 224.86,47.56ZM224.4,31.56C223.5,31.526 222.641,31.943 222.11,32.67C221.519,33.532 221.174,34.537 221.11,35.58L221.17,35.67L227.57,35.67L227.57,35.21C227.619,34.261 227.344,33.322 226.79,32.55C226.214,31.832 225.318,31.442 224.4,31.51L224.4,31.56Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
<path d="M242.35,23.11L242.35,27.85L245.61,27.85L245.61,31.51L242.35,31.51L242.35,41.36C242.296,41.937 242.465,42.513 242.82,42.97C243.15,43.299 243.604,43.474 244.07,43.45C244.304,43.451 244.538,43.435 244.77,43.4C245.003,43.363 245.233,43.313 245.46,43.25L245.91,47.02C245.408,47.195 244.893,47.332 244.37,47.43C243.834,47.516 243.293,47.56 242.75,47.56C241.219,47.662 239.712,47.126 238.59,46.08C237.508,44.765 236.984,43.077 237.13,41.38L237.13,31.51L234.31,31.51L234.31,27.85L237.13,27.85L237.13,23.11L242.35,23.11Z" style="fill:rgb(72,72,72);fill-rule:nonzero;"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<path d="M55.6,25.22C53.213,22.392 50.378,19.973 47.21,18.06L39.66,27.16C42.53,28.16 45.07,30.74 47.77,34.03L48.07,34.24L55.6,25.22Z" style="fill:rgb(32,167,201);fill-rule:nonzero;"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 8.9 KiB |
BIN
docs/src/images/vertica.png
Normal file
|
After Width: | Height: | Size: 6.6 KiB |
32
docs/src/pages/404.jsx
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
|
||||
import Layout from '../components/layout.tsx';
|
||||
import SEO from '../components/seo';
|
||||
|
||||
const NotFoundPage = () => (
|
||||
<Layout>
|
||||
<SEO title="404: Not found" />
|
||||
<h1>NOT FOUND</h1>
|
||||
<p>You just hit a route that does not exist... the sadness.</p>
|
||||
</Layout>
|
||||
);
|
||||
|
||||
export default NotFoundPage;
|
||||
143
docs/src/pages/community.tsx
Normal file
@@ -0,0 +1,143 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import React from 'react';
|
||||
import { css } from '@emotion/core';
|
||||
import { Card, List } from 'antd';
|
||||
import { GithubOutlined } from '@ant-design/icons';
|
||||
import SEO from '../components/seo';
|
||||
import Layout from '../components/layout';
|
||||
import { pmc } from '../resources/data';
|
||||
|
||||
const { Meta } = Card;
|
||||
|
||||
const links = [
|
||||
[
|
||||
'https://apache-superset.slack.com/join/shared_invite/zt-g8lpruog-HeqpgYrwdfrD5OYhlU7hPQ#/',
|
||||
'Slack',
|
||||
'interact with other Superset users and community members',
|
||||
],
|
||||
[
|
||||
'https://github.com/apache/incubator-superset',
|
||||
'GitHub',
|
||||
'create tickets to report issues, report bugs, and suggest new features',
|
||||
],
|
||||
[
|
||||
'https://lists.apache.org/list.html?dev@superset.apache.org',
|
||||
'dev@ Mailing List',
|
||||
'participate in conversations with committers and contributors',
|
||||
],
|
||||
[
|
||||
'https://stackoverflow.com/questions/tagged/superset+apache-superset',
|
||||
'Stack Overflow',
|
||||
'our growing knowledge base',
|
||||
],
|
||||
[
|
||||
'https://www.meetup.com/Global-Apache-Superset-Community-Meetup/',
|
||||
'Superset Meetup Group',
|
||||
'join our monthly virtual meetups and register for any upcoming events',
|
||||
],
|
||||
[
|
||||
'https://github.com/apache/incubator-superset/blob/master/INTHEWILD.md',
|
||||
'Organizations',
|
||||
'a list of some of the organizations using Superset in production',
|
||||
],
|
||||
[
|
||||
'https://github.com/apache-superset/awesome-apache-superset',
|
||||
'Contributors Guide',
|
||||
'Interested in contributing? Learn how to contribute and best practices',
|
||||
],
|
||||
];
|
||||
|
||||
const communityContainer = css`
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
justify-content: space-around;
|
||||
margin: 0 auto;
|
||||
overflow: auto;
|
||||
.communityCard {
|
||||
font-size: 12px;
|
||||
overflow: hidden;
|
||||
margin: 10px 10px;
|
||||
.ant-card-meta-title {
|
||||
text-overflow: clip;
|
||||
white-space: normal;
|
||||
}
|
||||
.ant-card-body {
|
||||
padding: 8px;
|
||||
display:inline-block;
|
||||
white-space: nowrap;
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
const getInvolvedContainer = css`
|
||||
margin-bottom: 25px;
|
||||
`;
|
||||
|
||||
const Community = () => {
|
||||
const pmcList = pmc.map((e) => {
|
||||
const name = e.name.indexOf(' ');
|
||||
return (
|
||||
<a href={e.github} target="_blank" rel="noreferrer" key={name}>
|
||||
<Card
|
||||
className="communityCard"
|
||||
hoverable
|
||||
style={{ width: '150px' }}
|
||||
size="small"
|
||||
cover={<img alt="example" src={e.image} />}
|
||||
>
|
||||
<GithubOutlined style={{ paddingRight: 3, paddingTop: 3}} />
|
||||
{e.name}
|
||||
</Card>
|
||||
</a>
|
||||
);
|
||||
});
|
||||
return (
|
||||
<Layout>
|
||||
<div className="contentPage">
|
||||
<SEO title="Community" />
|
||||
<section>
|
||||
<h1 className="title">Community</h1>
|
||||
Get involved in our welcoming, fast growing community!
|
||||
</section>
|
||||
<section className="joinCommunity">
|
||||
<div css={getInvolvedContainer}>
|
||||
<h2>Get involved!</h2>
|
||||
<List
|
||||
size="small"
|
||||
bordered
|
||||
dataSource={links}
|
||||
renderItem={([href, link, post]) => (
|
||||
<List.Item>
|
||||
<a href={href}>{link}</a> - {post}
|
||||
</List.Item>
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
</section>
|
||||
<section className="ppmc">
|
||||
<h2>Apache Committers</h2>
|
||||
<div css={communityContainer}>{pmcList}</div>
|
||||
</section>
|
||||
</div>
|
||||
</Layout>
|
||||
);
|
||||
};
|
||||
|
||||
export default Community;
|
||||
35
docs/src/pages/docs/Connecting to Databases/athena.mdx
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
name: Amazon Athena
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/athena
|
||||
index: 2
|
||||
version: 1
|
||||
---
|
||||
|
||||
## AWS Athena
|
||||
|
||||
### PyAthenaJDBC
|
||||
|
||||
[PyAthenaJDBC](https://pypi.org/project/PyAthenaJDBC/) is a Python DB 2.0 compliant wrapper for the
|
||||
[Amnazon Athena JDBC driver](https://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html).
|
||||
|
||||
The connection string for Amazon Athena is as follows:
|
||||
|
||||
```
|
||||
awsathena+jdbc://{aws_access_key_id}:{aws_secret_access_key}@athena.{region_name}.amazonaws.com/{schema_name}?s3_staging_dir={s3_staging_dir}&...
|
||||
```
|
||||
|
||||
Note that you'll need to escape & encode when forming the connection string like so:
|
||||
|
||||
```
|
||||
s3://... -> s3%3A//...
|
||||
```
|
||||
|
||||
### PyAthena
|
||||
|
||||
You can also use [PyAthena library](https://pypi.org/project/PyAthena/) (no Java required) with the
|
||||
following connection string:
|
||||
|
||||
```
|
||||
awsathena+rest://{aws_access_key_id}:{aws_secret_access_key}@athena.{region_name}.amazonaws.com/{
|
||||
```
|
||||
18
docs/src/pages/docs/Connecting to Databases/clickhouse.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Clickhouse
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/clickhouse
|
||||
index: 12
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Clickhouse
|
||||
|
||||
The recommended connector library for Clickhouse is
|
||||
[sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
clickhouse://{username}:{password}@{hostname}:{port}/{database}
|
||||
```
|
||||
18
docs/src/pages/docs/Connecting to Databases/cockroachdb.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: CockroachDB
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/cockroachdb
|
||||
index: 13
|
||||
version: 1
|
||||
---
|
||||
|
||||
## CockroachDB
|
||||
|
||||
The recommended connector library for CockroachDB is
|
||||
[sqlalchemy-cockroachdb](https://github.com/cockroachdb/sqlalchemy-cockroachdb).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
cockroachdb://root@{hostname}:{port}/{database}?sslmode=disable
|
||||
```
|
||||
@@ -0,0 +1,86 @@
|
||||
---
|
||||
name: New Drivers to Docker Image
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/dockeradddrivers
|
||||
index: 1
|
||||
version: 1
|
||||
---
|
||||
## Install New Database Drivers in Docker Image
|
||||
|
||||
Superset requires a Python database driver to be installed for each additional type of database you
|
||||
want to connect to. When setting up Superset locally via `docker-compose`, the drivers and packages
|
||||
contained in
|
||||
[requirements.txt](https://github.com/apache/incubator-superset/blob/master/requirements.txt) and
|
||||
[requirements-dev.txt](https://github.com/apache/incubator-superset/blob/master/requirements-dev.txt)
|
||||
will be installed automatically.
|
||||
|
||||
In this section, we'll walk through how to install the MySQL connector library. The connector
|
||||
library installation process is the same for all additional libraries and we'll end this section
|
||||
with the recommended connector library for each database.
|
||||
|
||||
### 1. Determine the driver you need
|
||||
|
||||
To figure out how to install the [database driver](/docs/databases/installing-database-drivers) of your choice.
|
||||
|
||||
In the example, we'll walk through the process of installing a MySQL driver in Superset.
|
||||
|
||||
### 2. Install MySQL Driver
|
||||
|
||||
As we are currently running inside of a Docker container via `docker compose`, we cannot simply run
|
||||
`pip install mysqlclient` on our local shell and expect the drivers to be installed within the
|
||||
Docker containers for superset.
|
||||
|
||||
In order to address this, the Superset `docker compose` setup comes with a mechanism for you to
|
||||
install packages locally, which will be ignored by Git for the purposes of local development. Please
|
||||
follow these steps:
|
||||
|
||||
Create `requirements-local.txt`
|
||||
|
||||
```
|
||||
# From the repo root...
|
||||
touch ./docker/requirements-local.txt
|
||||
```
|
||||
|
||||
Add the driver selected in step above:
|
||||
|
||||
```
|
||||
echo "mysqlclient" >> ./docker/requirements-local.txt
|
||||
```
|
||||
|
||||
Rebuild your local image with the new driver baked in:
|
||||
|
||||
```
|
||||
docker-compose build --force-rm
|
||||
```
|
||||
|
||||
After the rebuild is complete, which make take a few minutes, relaunch:
|
||||
|
||||
```
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
### 3. Connect to MySQL
|
||||
|
||||
Now that you've got a MySQL driver installed locally, you should be able to test it out.
|
||||
|
||||
We can now create a Datasource in Superset that can be used to connect to a MySQL instance. Assuming
|
||||
your MySQL instance is running locally and can be accessed via localhost, use the following
|
||||
connection string in “SQL Alchemy URI”, by going to Sources > Databases > + icon (to add a new
|
||||
datasource) in Superset.
|
||||
|
||||
For Docker running in Linux:
|
||||
|
||||
```
|
||||
mysql://mysqluser:mysqluserpassword@localhost/example?charset=utf8
|
||||
```
|
||||
|
||||
For Docker running in OSX:
|
||||
|
||||
```
|
||||
mysql://mysqluser:mysqluserpassword@docker.for.mac.host.internal/example?charset=utf8
|
||||
```
|
||||
|
||||
Then click “Test Connection”, which should give you an “OK” message. If not, please look at your
|
||||
terminal for error messages, and reach out for help.
|
||||
|
||||
You can repeat this process for every database you want superset to be able to connect to.
|
||||
21
docs/src/pages/docs/Connecting to Databases/dremio.mdx
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: Dremio
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/dremio
|
||||
index: 14
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Dremio
|
||||
|
||||
The recommended connector library for Dremio is
|
||||
[sqlalchemy_dremio](https://github.com/sqggles/sqlalchemy_dremio).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
dremio://{username}:{password}@{host}:{port}/dremio
|
||||
```
|
||||
|
||||
This [blog post by Dremio](https://www.dremio.com/tutorials/dremio-apache-superset/) has some
|
||||
additional helpful instructions on connecting Superset to Dremio.
|
||||
48
docs/src/pages/docs/Connecting to Databases/drill.mdx
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: Apache Drill
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/drill
|
||||
index: 4
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Drill
|
||||
|
||||
### SQLAlchemy
|
||||
|
||||
The recommended way to connect to Apache Drill is through SQLAlchemy. You can use the
|
||||
[sqlalchemy-drill](https://github.com/JohnOmernik/sqlalchemy-drill) package.
|
||||
|
||||
Once that is done, you can connect to Drill in two ways, either via the REST interface or by JDBC.
|
||||
If you are connecting via JDBC, you must have the Drill JDBC Driver installed.
|
||||
|
||||
The basic connection string for Drill looks like this:
|
||||
|
||||
```
|
||||
drill+sadrill://<username>:<password>@<host>:<port>/<storage_plugin>?use_ssl=True
|
||||
```
|
||||
|
||||
To connect to Drill running on a local machine running in embedded mode you can use the following
|
||||
connection string:
|
||||
|
||||
```
|
||||
drill+sadrill://localhost:8047/dfs?use_ssl=False
|
||||
```
|
||||
|
||||
### JDBC
|
||||
|
||||
Connecting to Drill through JDBC is more complicated and we recommend following
|
||||
[this tutorial](https://drill.apache.org/docs/using-the-jdbc-driver/).
|
||||
|
||||
The connection string looks like:
|
||||
|
||||
```
|
||||
drill+jdbc://<username>:<passsword>@<host>:<port>
|
||||
```
|
||||
|
||||
### ODBC
|
||||
|
||||
We recommend reading the
|
||||
[Apache Drill documentation](https://drill.apache.org/docs/installing-the-driver-on-linux/) and read
|
||||
the [Github README](https://github.com/JohnOmernik/sqlalchemy-drill#usage-with-odbc) to learn how to
|
||||
work with Drill through ODBC.
|
||||
64
docs/src/pages/docs/Connecting to Databases/druid.mdx
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
name: Apache Druid
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/druid
|
||||
index: 5
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Druid
|
||||
|
||||
A native connector to Druid ships with Superset (behind the `DRUID_IS_ACTIVE` flag) but this is
|
||||
slowly getting deprecated in favor of SQLAlchemy / DBAPI connector made available in the
|
||||
[pydruid library](https://pythonhosted.org/pydruid/).
|
||||
|
||||
The connection string looks like:
|
||||
|
||||
```
|
||||
druid://<User>:<password>@<Host>:<Port-default-9088>/druid/v2/sql
|
||||
```
|
||||
|
||||
### Customizing Druid Connection
|
||||
|
||||
When adding a connection to Druid, you can customize the connection a few different ways in the
|
||||
**Add Database** form.
|
||||
|
||||
**Custom Certificate**
|
||||
|
||||
You can add certificates in the **Root Certificate** field when configuring the new database
|
||||
connection to Druid:
|
||||
|
||||
<img src="/images/root-cert-example.png" />{' '}
|
||||
|
||||
When using a custom certificate, pydruid will automatically use https scheme.
|
||||
|
||||
**Disable SSL Verification**
|
||||
|
||||
To disable SSL verification, add the following to the **Extras** field:
|
||||
|
||||
```
|
||||
engine_params:
|
||||
{"connect_args":
|
||||
{"scheme": "https", "ssl_verify_cert": false}}
|
||||
```
|
||||
|
||||
### Aggregations
|
||||
|
||||
Common aggregations or Druid metrics can be defined and used in Superset. The first and simpler use
|
||||
case is to use the checkbox matrix exposed in your datasource’s edit view (**Sources -> Druid
|
||||
Datasources -> [your datasource] -> Edit -> [tab] List Druid Column**).
|
||||
|
||||
Clicking the GroupBy and Filterable checkboxes will make the column appear in the related dropdowns
|
||||
while in the Explore view. Checking Count Distinct, Min, Max or Sum will result in creating new
|
||||
metrics that will appear in the **List Druid Metric** tab upon saving the datasource.
|
||||
|
||||
By editing these metrics, you’ll notice that their JSON element corresponds to Druid aggregation
|
||||
definition. You can create your own aggregations manually from the **List Druid Metric** tab
|
||||
following Druid documentation.
|
||||
|
||||
### Post-Aggregations
|
||||
|
||||
Druid supports post aggregation and this works in Superset. All you have to do is create a metric,
|
||||
much like you would create an aggregation manually, but specify `postagg` as a `Metric Type`. You
|
||||
then have to provide a valid json post-aggregation definition (as specified in the Druid docs) in
|
||||
the JSON field.
|
||||
@@ -0,0 +1,50 @@
|
||||
---
|
||||
name: Elasticsearch
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/elasticsearch
|
||||
index: 15
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Elasticsearch
|
||||
|
||||
The recommended connector library for Elasticsearch is
|
||||
[elasticsearch-dbapi](https://github.com/preset-io/elasticsearch-dbapi).
|
||||
|
||||
The connection string for Elasticsearch looks like this:
|
||||
|
||||
```
|
||||
elasticsearch+http://{user}:{password}@{host}:9200/
|
||||
```
|
||||
|
||||
**Using HTTPS**
|
||||
|
||||
```
|
||||
elasticsearch+https://{user}:{password}@{host}:9200/
|
||||
```
|
||||
|
||||
Elasticsearch as a default limit of 10000 rows, so you can increase this limit on your cluster or
|
||||
set Superset’s row limit on config
|
||||
|
||||
```
|
||||
ROW_LIMIT = 10000
|
||||
```
|
||||
|
||||
You can query multiple indices on SQL Lab for example
|
||||
|
||||
```
|
||||
SELECT timestamp, agent FROM "logstash"
|
||||
```
|
||||
|
||||
But, to use visualizations for multiple indices you need to create an alias index on your cluster
|
||||
|
||||
```
|
||||
POST /_aliases
|
||||
{
|
||||
"actions" : [
|
||||
{ "add" : { "index" : "logstash-**", "alias" : "logstash_all" } }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Then register your table with the alias name logstasg_all
|
||||
18
docs/src/pages/docs/Connecting to Databases/exasol.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Exasol
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/exasol
|
||||
index: 16
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Exasol
|
||||
|
||||
The recommended connector library for Exasol is
|
||||
[sqlalchemy-exasol](https://github.com/exasol/sqlalchemy-exasol).
|
||||
|
||||
The connection string for Exasol looks like this:
|
||||
|
||||
```
|
||||
exa+pyodbc://{username}:{password}@{hostname}:{port}/my_schema?CONNECTIONLCALL=en_US.UTF-8&driver=EXAODBC
|
||||
```
|
||||
@@ -0,0 +1,70 @@
|
||||
---
|
||||
name: Extra Database Settings
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/extra-settings
|
||||
index: 40
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Extra Database Settings
|
||||
|
||||
### Deeper SQLAlchemy Integration
|
||||
|
||||
It is possible to tweak the database connection information using the parameters exposed by
|
||||
SQLAlchemy. In the **Database edit** view, you can edit the **Extra** field as a JSON blob.
|
||||
|
||||
This JSON string contains extra configuration elements. The `engine_params` object gets unpacked
|
||||
into the `sqlalchemy.create_engine` call, while the `metadata_params` get unpacked into the
|
||||
`sqlalchemy.MetaData` call. Refer to the SQLAlchemy docs for more information.
|
||||
|
||||
### Schemas
|
||||
|
||||
Databases like Postgres and Redshift use the **schema** as the logical entity on top of the
|
||||
**database**. For Superset to connect to a specific schema, you can set the **schema** parameter in
|
||||
the **Edit Tables** form (Sources > Tables > Edit record).
|
||||
|
||||
### External Password Store for SQLAlchemy Connections
|
||||
|
||||
Superset can be configured to use an external store for database passwords. This is useful if you a
|
||||
running a custom secret distribution framework and do not wish to store secrets in Superset’s meta
|
||||
database.
|
||||
|
||||
Example: Write a function that takes a single argument of type `sqla.engine.url` and returns the
|
||||
password for the given connection string. Then set `SQLALCHEMY_CUSTOM_PASSWORD_STORE` in your config
|
||||
file to point to that function.
|
||||
|
||||
```python
|
||||
def example_lookup_password(url):
|
||||
secret = <<get password from external framework>>
|
||||
return 'secret'
|
||||
|
||||
SQLALCHEMY_CUSTOM_PASSWORD_STORE = example_lookup_password
|
||||
```
|
||||
|
||||
A common pattern is to use environment variables to make secrets available.
|
||||
`SQLALCHEMY_CUSTOM_PASSWORD_STORE` can also be used for that purpose.
|
||||
|
||||
```python
|
||||
def example_password_as_env_var(url):
|
||||
# assuming the uri looks like
|
||||
# mysql://localhost?superset_user:{SUPERSET_PASSWORD}
|
||||
return url.password.format(os.environ)
|
||||
|
||||
SQLALCHEMY_CUSTOM_PASSWORD_STORE = example_password_as_env_var
|
||||
```
|
||||
|
||||
### SSL Access to Databases
|
||||
|
||||
You can use the `Extra` field in the **Edit Databases** form to configure SSL:
|
||||
|
||||
```JSON
|
||||
{
|
||||
"metadata_params": {},
|
||||
"engine_params": {
|
||||
"connect_args":{
|
||||
"sslmode":"require",
|
||||
"sslrootcert": "/path/to/my/pem"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,57 @@
|
||||
---
|
||||
name: Google BigQuery
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/bigquery
|
||||
index: 16
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Google BigQuery
|
||||
|
||||
The recommended connector library for BigQuery is
|
||||
[pybigquery](https://github.com/mxmzdlv/pybigquery).
|
||||
|
||||
The connection string for BigQuery looks like:
|
||||
|
||||
```
|
||||
bigquery://{project_id}
|
||||
```
|
||||
|
||||
When adding a new BigQuery connection in Superset, you'll also need to add the GCP Service Account
|
||||
credentials file (as a JSON).
|
||||
|
||||
1. Create your Service Account via the Google Cloud Platform control panel, provide it access to the
|
||||
appropriate BigQuery datasets, and download the JSON configuration file for the service account.
|
||||
|
||||
2. n Superset, Add a JSON blob to the **Secure Extra** field in the database configuration form with
|
||||
the following format:
|
||||
|
||||
```
|
||||
{
|
||||
"credentials_info": <contents of credentials JSON file>
|
||||
}
|
||||
```
|
||||
|
||||
The resulting file should have this structure:
|
||||
|
||||
```
|
||||
{
|
||||
"credentials_info": {
|
||||
"type": "service_account",
|
||||
"project_id": "...",
|
||||
"private_key_id": "...",
|
||||
"private_key": "...",
|
||||
"client_email": "...",
|
||||
"client_id": "...",
|
||||
"auth_uri": "...",
|
||||
"token_uri": "...",
|
||||
"auth_provider_x509_cert_url": "...",
|
||||
"client_x509_cert_url": "...",
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You should then be able to connect to your BigQuery datasets.
|
||||
|
||||
To be able to upload CSV or Excel files to BigQuery in Superset, you'll need to also add the
|
||||
[pandas_gbq](https://github.com/pydata/pandas-gbq) library.
|
||||
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Google Sheets
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/google-sheets
|
||||
index: 17
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Google Sheets
|
||||
|
||||
Google Sheets has a very limited
|
||||
[SQL API](https://developers.google.com/chart/interactive/docs/querylanguage). The recommended
|
||||
connector library for Google Sheets is [gsheetsdb](https://github.com/betodealmeida/gsheets-db-api).
|
||||
|
||||
There are a few steps involved in connecting Superset to Google Sheets. This
|
||||
[tutorial](https://preset.io/blog/2020-06-01-connect-superset-google-sheets/) has the most upto date
|
||||
instructions on setting up this connection.
|
||||
17
docs/src/pages/docs/Connecting to Databases/hana.mdx
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Hana
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/hana
|
||||
index: 18
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Hana
|
||||
|
||||
The recommended connector library is [sqlalchemy-hana](https://github.com/SAP/sqlalchemy-hana).
|
||||
|
||||
The connection string is formatted as follows:
|
||||
|
||||
```
|
||||
hana://{username}:{password}@{host}:{port}
|
||||
```
|
||||
17
docs/src/pages/docs/Connecting to Databases/hive.mdx
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Apache Hive
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/hive
|
||||
index: 6
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Hive
|
||||
|
||||
The [pyhive](https://pypi.org/project/PyHive/) library is the recommended way to connect to Hive through SQLAlchemy.
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
hive://hive@{hostname}:{port}/{database}
|
||||
```
|
||||
18
docs/src/pages/docs/Connecting to Databases/ibm-db2.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: IBM DB2
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/ibm-db2
|
||||
index: 18
|
||||
version: 1
|
||||
---
|
||||
|
||||
## IBM DB2
|
||||
|
||||
The [IBM_DB_SA](https://github.com/ibmdb/python-ibmdbsa/tree/master/ibm_db_sa) library provides a
|
||||
Python / SQLAlchemy interface to IBM Data Servers.
|
||||
|
||||
Here's the recommended connection string:
|
||||
|
||||
```
|
||||
db2+ibm_db://{username}:{passport}@{hostname}:{port}/{database}
|
||||
```
|
||||
17
docs/src/pages/docs/Connecting to Databases/impala.mdx
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Apache Impala
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/impala
|
||||
index: 7
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Impala
|
||||
|
||||
The recommended connector library to Apache Hive is [implya](https://github.com/cloudera/impyla).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
impala://{hostname}:{port}/{database}
|
||||
```
|
||||
70
docs/src/pages/docs/Connecting to Databases/index.mdx
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
name: Install Database Drivers
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/installing-database-drivers
|
||||
index: 0
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Install Database Drivers
|
||||
|
||||
Superset requires a Python database driver to be installed for each additional type of database you
|
||||
want to connect to.
|
||||
|
||||
Superset interacts with the underlying databases using the provided SQL interface (often times
|
||||
through a SQLAlchemy library).
|
||||
|
||||
### Supported Databases and Dependecies
|
||||
|
||||
Superset does not ship bundled with connectivity to databases, except for Sqlite, which is part of the Python standard library. You’ll need to install the required packages for the database you want to use as your metadata database as well as the packages needed to connect to the databases you want to access through Superset.
|
||||
|
||||
A list of some of the recommended packages.
|
||||
|
||||
| Database | PyPI package | Connection String |
|
||||
| --- | --- | --- |
|
||||
|[Amazon Athena](/docs/databases/athena)|```pip install "PyAthenaJDBC>1.0.9``` , ```pip install "PyAthena>1.2.0``` | ```awsathena+rest://{aws_access_key_id}:{aws_secret_access_key}@athena.{region_name}.amazonaws.com/{ ```|
|
||||
|[Amazon Redshift](/docs/databases/redshift)|```pip install sqlalchemy-redshift```| ``` redshift+psycopg2://<userName>:<DBPassword>@<AWS End Point>:5439/<Database Name>``` |
|
||||
|[Apache Drill](/docs/databases/drill)|```pip install sqlalchemy-drill```| ```drill+sadrill:// For JDBC drill+jdbc://``` |
|
||||
|[Apache Druid](/docs/databases/druid)|```pip install pydruid```| ```druid://<User>:<password>@<Host>:<Port-default-9088>/druid/v2/sql``` |
|
||||
|[Apache Hive](/docs/databases/hive)|```pip install pyhive```|```hive://hive@{hostname}:{port}/{database}```|
|
||||
|[Apache Impala](/docs/databases/impala)|```pip install impala```|```impala://{hostname}:{port}/{database}```|
|
||||
|[Apache Kylin](/docs/databases/kylin)|```pip install kylinpy```|```kylin://<username>:<password>@<hostname>:<port>/<project>?<param1>=<value1>&<param2>=<value2>```|
|
||||
|[Apache Pinot](/docs/databases/pinot)|```pip install pinotdb```|```pinot+http://CONTROLLER:5436/ query?server=http://CONTROLLER:5983/```|
|
||||
|[Apache Spark SQL](/docs/databases/spark)|```pip install pyhive```|```hive://hive@{hostname}:{port}/{database}```
|
||||
|[Azure MS SQL](/docs/databases/sqlserver)||```mssql+pymssql://UserName@presetSQL:TestPassword@presetSQL.database.windows.net:1433/TestSchema```
|
||||
|[Big Query](/docs/databases/bigquery)|```pip install pybigquery```|```bigquery://{project_id}```|
|
||||
|[ClickHouse](/docs/databases/clickhouse)|```pip install sqlalchemy-clickhouse```|```clickhouse://{username}:{password}@{hostname}:{port}/{database}```|
|
||||
|[CockroachDB](/docs/databases/cockroachdb)|```pip install cockroachdb```|```cockroachdb://root@{hostname}:{port}/{database}?sslmode=disable```|
|
||||
|[Dremio](/docs/databases/dremio)|```pip install sqlalchemy_dremio```|```dremio://user:pwd@host:31010/```|
|
||||
|[Elasticsearch](/docs/databases/elasticsearch)|```pip install sqlalchemy-exasol```|```elasticsearch+http://{user}:{password}@{host}:9200/```|
|
||||
|[Exasol](/docs/databases/exasol)|```pip install sqlalchemy-exasol```|```exa+pyodbc://{username}:{password}@{hostname}:{port}/my_schema?CONNECTIONLCALL=en_US.UTF-8&driver=EXAODBC```|
|
||||
|[Google Sheets](/docs/databases/google-sheets)|```pip install gsheetsdb```|```gsheets://```|
|
||||
|[IBM Db2](/docs/databases/ibm-db2)|```pip install ibm_db_sa```|```db2+ibm_db://```|
|
||||
|[MySQL](/docs/databases/mysql)|```pip install mysqlclient```|```mysql://<UserName>:<DBPassword>@<Database Host>/<Database Name>```|
|
||||
|[Oracle](/docs/databases/oracle)|```pip install cx_Oracle```|```oracle://```|
|
||||
|[PostgreSQL](/docs/databases/postgresql)|```pip install psycopg2```|```postgresql:://<UserName>:<DBPassword>@<Database Host>/<Database Name>```|
|
||||
|[Presto](/docs/databases/presto)|```pip install pyhive```|```presto://```|
|
||||
|[SAP Hana](/docs/databases/hana)|```pip install hdbcli sqlalchemy-hana or pip install apache-superset[hana]```|```hana://{username}:{password}@{host}:{port}```|
|
||||
|[Snowflake](/docs/databases/snowflake)|```pip install snowflake-sqlalchemy```|```ssnowflake://{user}:{password}@{account}.{region}/{database}?role={role}&warehouse={warehouse}```|
|
||||
|SQLite||```sqlite://```|
|
||||
|[SQL Server](/docs/databases/sqlserver)|```pip install pymssql```|```mssql://```|
|
||||
|[Teradata](/docs/databases/teradata)|```pip install sqlalchemy-teradata```|```teradata://{user}:{password}@{host}```|
|
||||
|[Vertica](/docs/databases/vertica)|```pip install sqlalchemy-vertica-python```|```vertica+vertica_python://<UserName>:<DBPassword>@<Database Host>/<Database Name>```|
|
||||
|
||||
***
|
||||
|
||||
Note that many other databases are supported, the main criteria being the existence of a functional SqlAlchemy dialect and Python driver. Googling the keyword sqlalchemy in addition of a keyword that describes the database you want to connect to should get you to the right place.
|
||||
|
||||
If your database or data engine isn't on the list but a SQL interface
|
||||
exists, please file an issue on the
|
||||
[Superset GitHub repo](https://github.com/apache/incubator-superset/issues), so we can work on
|
||||
supporting it.
|
||||
|
||||
[StackOverflow](https://stackoverflow.com/questions/tagged/apache-superset+superset) and the
|
||||
[Superset community Slack](https://join.slack.com/t/apache-superset/shared_invite/enQtNDMxMDY5NjM4MDU0LWJmOTcxYjlhZTRhYmEyYTMzOWYxOWEwMjcwZDZiNWRiNDY2NDUwNzcwMDFhNzE1ZmMxZTZlZWY0ZTQ2MzMyNTU)
|
||||
are great places to get help with connecting to databases in Superset.
|
||||
|
||||
In the end, you should be looking for a Python package compatible with your database. One part that
|
||||
makes database driver installation tricky is the fact that local binaries are sometimes required in
|
||||
order for them to bind properly, which means that various apt packages might need to be installed
|
||||
before pip can get things set up.
|
||||
18
docs/src/pages/docs/Connecting to Databases/kylin.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Apache Kylin
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/kylin
|
||||
index: 8
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Kylin
|
||||
|
||||
The recommended connector library for Apache Kylin is
|
||||
[kylinpy](https://github.com/Kyligence/kylinpy).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
kylin://<username>:<password>@<hostname>:<port>/<project>?<param1>=<value1>&<param2>=<value2>
|
||||
```
|
||||
28
docs/src/pages/docs/Connecting to Databases/mysql.mdx
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: MySQL
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/mysql
|
||||
index: 19
|
||||
version: 1
|
||||
---
|
||||
|
||||
## MySQL
|
||||
|
||||
The recommended connector library for MySQL is [mysqlclient](https://pypi.org/project/mysqlclient/).
|
||||
|
||||
Set up the Connection String using the following settings:
|
||||
|
||||
User Name: UserName
|
||||
Password: DBPassword
|
||||
Database Host:
|
||||
- For Localhost or docker running Linux: localhost or 127.0.0.1
|
||||
- For On Prem: IP address or Host name
|
||||
- For AWS Endpoint
|
||||
- For Docker running in OSX: docker.for.mac.host.internal
|
||||
Database Name: Database Name
|
||||
Port: default 3306
|
||||
|
||||
Connection String
|
||||
```
|
||||
mysql://<UserName>:<DBPassword>@<Database Host>/<Database Name>
|
||||
```
|
||||
18
docs/src/pages/docs/Connecting to Databases/oracle.mdx
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Oracle
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/oracle
|
||||
index: 20
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Oracle
|
||||
|
||||
The recommended connector library is
|
||||
[cx_Oracle](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html).
|
||||
|
||||
The connection string is formatted as follows:
|
||||
|
||||
```
|
||||
oracle://<username>:<password>@<hostname>:<port>
|
||||
```
|
||||
17
docs/src/pages/docs/Connecting to Databases/pinot.mdx
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Apache Pinot
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/pinot
|
||||
index: 9
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Pinot
|
||||
|
||||
The recommended connector library for Apache Pinot is [pinotdb](https://pypi.org/project/pinotdb/).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
pinot+http://controller:5436/query?server=http://controller:5983/``
|
||||
```
|
||||
40
docs/src/pages/docs/Connecting to Databases/postgres.mdx
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
name: Postgres
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/postgres
|
||||
index: 21
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Postgres
|
||||
|
||||
Note that the Postgres connector library [psycopg2](https://www.psycopg.org/docs/) comes out of the
|
||||
box with Superset.
|
||||
|
||||
Postgres sample connection parameters:
|
||||
|
||||
- **User Name**: UserName
|
||||
- **Password**: DBPassword
|
||||
- **Database Host**:
|
||||
- For Localhost: localhost or 127.0.0.1
|
||||
- For On Prem: IP address or Host name
|
||||
- For AWS Endpoint
|
||||
- **Database Name**: Database Name
|
||||
- **Port**: default 5432
|
||||
|
||||
The connection string looks like:
|
||||
|
||||
```
|
||||
postgresql+psycopg2://{username}:{password}@{host}:{port}/{database}
|
||||
```
|
||||
|
||||
Extra parameters:
|
||||
|
||||
```
|
||||
?sslmode=require
|
||||
```
|
||||
|
||||
More information about PostgreSQL connection options can be found in the
|
||||
[SQLAlchemy docs](https://docs.sqlalchemy.org/en/13/dialects/postgresql.html#module-sqlalchemy.dialects.postgresql.psycopg2)
|
||||
and the
|
||||
[PostgreSQL docs](https://www.postgresql.org/docs/9.1/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS).
|
||||
26
docs/src/pages/docs/Connecting to Databases/presto.mdx
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Presto
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/presto
|
||||
index: 22
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Presto
|
||||
|
||||
The [pyhive](https://pypi.org/project/PyHive/) library is the recommended way to connect to Presto through SQLAlchemy.
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
hive://hive@{hostname}:{port}/{database}
|
||||
```
|
||||
|
||||
By default Superset assumes the most recent version of Presto is being used when querying the
|
||||
datasource. If you’re using an older version of Presto, you can configure it in the extra parameter:
|
||||
|
||||
```
|
||||
{
|
||||
"version": "0.123"
|
||||
}
|
||||
```
|
||||
26
docs/src/pages/docs/Connecting to Databases/redshift.mdx
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Amazon Redshift
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/redshift
|
||||
index: 3
|
||||
version: 1
|
||||
---
|
||||
|
||||
## AWS Redshift
|
||||
|
||||
The [sqlalchemy-redshift](https://pypi.org/project/sqlalchemy-redshift/) library is the recommended
|
||||
way to connect to Redshift through SQLAlchemy.
|
||||
|
||||
You'll need to the following setting values to form the connection string:
|
||||
|
||||
- **User Name**: userName
|
||||
- **Password**: DBPassword
|
||||
- **Database Host**: AWS Endpoint
|
||||
- **Database Name**: Database Name
|
||||
- **Port**: default 5439
|
||||
|
||||
Here's what the connection string looks like:
|
||||
|
||||
```
|
||||
redshift+psycopg2://<userName>:<DBPassword>@<AWS End Point>:5439/<Database Name>
|
||||
```
|
||||
32
docs/src/pages/docs/Connecting to Databases/snowflake.mdx
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Snowflake
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/snowflake
|
||||
index: 23
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Snowflake
|
||||
|
||||
The recommended connector library for Snowflake is
|
||||
[snowflake-sqlalchemy](https://pypi.org/project/snowflake-sqlalchemy/).
|
||||
|
||||
The connection string for Snowflake looks like this:
|
||||
|
||||
```
|
||||
snowflake://{user}:{password}@{account}.{region}/{database}?role={role}&warehouse={warehouse}
|
||||
```
|
||||
|
||||
The schema is not necessary in the connection string, as it is defined per table/query. The role and
|
||||
warehouse can be omitted if defaults are defined for the user, i.e.
|
||||
|
||||
```
|
||||
snowflake://{user}:{password}@{account}.{region}/{database}
|
||||
```
|
||||
|
||||
Make sure the user has privileges to access and use all required
|
||||
databases/schemas/tables/views/warehouses, as the Snowflake SQLAlchemy engine does not test for
|
||||
user/role rights during engine creation by default. However, when pressing the “Test Connection”
|
||||
button in the Create or Edit Database dialog, user/role credentials are validated by passing
|
||||
“validate_default_parameters”: True to the connect() method during engine creation. If the user/role
|
||||
is not authorized to access the database, an error is recorded in the Superset logs.
|
||||
17
docs/src/pages/docs/Connecting to Databases/spark-sql.mdx
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Apache Spark SQL
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/spark-sql
|
||||
index: 10
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Apache Spark SQL
|
||||
|
||||
The recommended connector library for Apache Spark SQL [pyhive](https://pypi.org/project/PyHive/).
|
||||
|
||||
The expected connection string is formatted as follows:
|
||||
|
||||
```
|
||||
hive://hive@{hostname}:{port}/{database}
|
||||
```
|
||||
17
docs/src/pages/docs/Connecting to Databases/sql-server.mdx
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: SQL Server
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/sql-server
|
||||
index: 24
|
||||
version: 1
|
||||
---
|
||||
|
||||
## SQL Server
|
||||
|
||||
The recommended connector library for SQL Server is [pymssql](https://github.com/pymssql/pymssql).
|
||||
|
||||
The connection string for SQL Server looks like this:
|
||||
|
||||
```
|
||||
mssql+pymssql://UserName@DB:Password@DB_Host:1433/TestSchema
|
||||
```
|
||||
29
docs/src/pages/docs/Connecting to Databases/teradata.mdx
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: Teradata
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/teradata
|
||||
index: 25
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Teradata
|
||||
|
||||
The recommended connector library is
|
||||
[sqlalchemy-teradata](https://github.com/Teradata/sqlalchemy-teradata).
|
||||
|
||||
The connection string for Teradata looks like this:
|
||||
|
||||
```
|
||||
teradata://{user}:{password}@{host}
|
||||
```
|
||||
|
||||
Note: Its required to have Teradata ODBC drivers installed and environment variables configured for
|
||||
proper work of sqlalchemy dialect. Teradata ODBC Drivers available here:
|
||||
https://downloads.teradata.com/download/connectivity/odbc-driver/linux
|
||||
|
||||
Required environment variables:
|
||||
|
||||
```
|
||||
export ODBCINI=/.../teradata/client/ODBC_64/odbc.ini
|
||||
export ODBCINST=/.../teradata/client/ODBC_64/odbcinst.ini
|
||||
```
|
||||
32
docs/src/pages/docs/Connecting to Databases/vertica.mdx
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Vertica
|
||||
menu: Connecting to Databases
|
||||
route: /docs/databases/vertica
|
||||
index: 26
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Vertica
|
||||
|
||||
The recommended connector library is
|
||||
[sqlalchemy-vertica-python](https://pypi.org/project/sqlalchemy-vertica-python/). The
|
||||
[Vertica](http://www.vertica.com/) connection parameters are:
|
||||
|
||||
- **User Name:** UserName
|
||||
- **Password:** DBPassword
|
||||
- **Database Host:**
|
||||
- For Localhost : localhost or 127.0.0.1
|
||||
- For On Prem : IP address or Host name
|
||||
- For Cloud: IP Address or Host Name
|
||||
- **Database Name:** Database Name
|
||||
- **Port:** default 5433
|
||||
|
||||
The connection string is formatted as follows:
|
||||
|
||||
```
|
||||
vertica+vertica_python://{username}:{password}@{host}/{database}
|
||||
```
|
||||
|
||||
Other parameters:
|
||||
|
||||
- Load Balancer - Backup Host
|
||||
@@ -0,0 +1,365 @@
|
||||
---
|
||||
name: Exploring Data in Superset
|
||||
menu: Creating Charts and Dashboards
|
||||
route: /docs/creating-charts-dashboards/exploring-data
|
||||
index: 2
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Exploring Data in Superset
|
||||
|
||||
In this tutorial, we will introduce key concepts in Apache Superset through the exploration of a
|
||||
real dataset which contains the flights made by employees of a UK-based organization in 2011. The
|
||||
following information about each flight is given:
|
||||
|
||||
- The traveller’s department. For the purposes of this tutorial the departments have been renamed
|
||||
Orange, Yellow and Purple.
|
||||
- The cost of the ticket.
|
||||
- The travel class (Economy, Premium Economy, Business and First Class).
|
||||
- Whether the ticket was a single or return.
|
||||
- The date of travel.
|
||||
- Information about the origin and destination.
|
||||
- The distance between the origin and destination, in kilometers (km).
|
||||
|
||||
### Enabling Upload a CSV Functionality
|
||||
|
||||
You may need to enable the functionality to upload a CSV to your database. The following section
|
||||
explains how to enable this functionality for the examples database.
|
||||
|
||||
In the top menu, select **Sources ‣ Databases**. Find the **examples** database in the list and
|
||||
select the edit record button.
|
||||
|
||||
<img src="/images/edit-record.png" />
|
||||
|
||||
Within the **Edit Database** page, check the **Allow Csv Upload** checkbox. Save by selecting
|
||||
**Save** at the bottom of the page.
|
||||
|
||||
### Loading CSV Data
|
||||
|
||||
Download the CSV dataset to your computer from
|
||||
[Github](https://raw.githubusercontent.com/apache-superset/examples-data/master/tutorial_flights.csv).
|
||||
In the Superset menu, select **Sources > Upload a CSV**.
|
||||
|
||||
<img src="/images/upload_a_csv.png" />
|
||||
|
||||
Then, enter the **Table Name** as _tutorial_flights_ and select the CSV file from your computer.
|
||||
|
||||
<img src="/images/csv_to_database_configuration.png" />
|
||||
|
||||
Next enter the text _Travel Date_ into the **Parse Dates** field.
|
||||
|
||||
<img src="/images/parse_dates_column.png" />
|
||||
|
||||
Leaving all the other options in their default settings, select **Save** at the bottom of the page.
|
||||
|
||||
### Table Visualization
|
||||
|
||||
In this section, we’ll create our first visualization: a table to show the number of flights and
|
||||
cost per travel class.
|
||||
|
||||
To create a new chart, select **New > Chart**.
|
||||
|
||||
<img src="/images/add_new_chart.png" />
|
||||
|
||||
Once in the **Create a new chart** form, select _tutorial_flights_ from the **Chose a datasource**
|
||||
dropdown.
|
||||
|
||||
<img src="/images/chose_a_datasource.png" />
|
||||
|
||||
Next, select the visualization type as **Table**.
|
||||
|
||||
<img src="/images/select_table_visualization_type.png" />
|
||||
|
||||
Then, select **Create new chart** to go into the chart view.
|
||||
|
||||
By default, Apache Superset only shows the last week of data: in our example, we want to look at all
|
||||
the data in the dataset. No problem - within the **Time** section, remove the filter on **Time
|
||||
range** by selecting **Last week** then changing the selection to **No filter**, with a final **OK**
|
||||
to confirm your selection.
|
||||
|
||||
<img src="/images/no_filter_on_time_filter.png" />
|
||||
|
||||
Now, we want to specify the rows in our table by using the **Group by** option. Since in this
|
||||
example, we want to understand different Travel Classes, we select **Travel Class** in this menu.
|
||||
|
||||
Next, we can specify the metrics we would like to see in our table with the **Metrics**option.
|
||||
Count(\*), which represents the number of rows in the table (in this case corresponding to the
|
||||
number of flights since we have a row per flight), is already there. To add cost, within
|
||||
**Metrics**, select **Cost**.
|
||||
|
||||
**Save** the default aggregation option, which is to sum the column.
|
||||
|
||||
<img src="/images/sum_cost_column.png" />
|
||||
|
||||
Finally, select **Run Query** to see the results of the table.
|
||||
|
||||
<img src="/images/tutorial_table.png" />
|
||||
|
||||
Congratulations, you have created your first visualization in Apache Superset!
|
||||
|
||||
To save the visualization, click on **Save** in the top left of the screen. Select the ** Save as**
|
||||
option, and enter the chart name as Tutorial Table (you will be able to find it again through the
|
||||
**Charts** screen, accessible in the top menu). Similarly, select **Add to new dashboard** and enter
|
||||
Tutorial Dashboard. Finally, select **Save & go to dashboard**.
|
||||
|
||||
<img src="/images/save_tutorial_table.png" />
|
||||
|
||||
### Dashboard Basics
|
||||
|
||||
Next, we are going to explore the dashboard interface. If you’ve followed the previous section, you
|
||||
should already have the dashboard open. Otherwise, you can navigate to the dashboard by selecting
|
||||
Dashboards on the top menu, then Tutorial dashboard from the list of dashboards.
|
||||
|
||||
On this dashboard you should see the table you created in the previous section. Select **Edit
|
||||
dashboard** and then hover over the table. By selecting the bottom right hand corner of the table
|
||||
(the cursor will change too), you can resize it by dragging and dropping.
|
||||
|
||||
<img src="/images/resize_tutorial_table_on_dashboard.png" />
|
||||
|
||||
Finally, save your changes by selecting Save changes in the top right.
|
||||
|
||||
### Pivot Table
|
||||
|
||||
In this section, we will extend our analysis using a more complex visualization, Pivot Table. By the
|
||||
end of this section, you will have created a table that shows the monthly spend on flights for the
|
||||
first six months, by department, by travel class.
|
||||
|
||||
As before, create a new visualization by selecting **New > Chart** on the top menu. Choose
|
||||
tutorial_flights again as a datasource, then click on the visualization type to get to the
|
||||
visualization menu. Select the **Pivot Table** visualization (you can filter by entering text in the
|
||||
search box) and then **Create a new chart**.
|
||||
|
||||
In the **Time** section, keep the Time Column as Travel Date (this is selected automatically as we
|
||||
only have one time column in our dataset). Then select Time Grain to be month as having daily data
|
||||
would be too granular to see patterns from. Then select the time range to be the first six months of
|
||||
2011 by click on Last week in the Time Range section, then in Custom selecting a Start / end of 1st
|
||||
January 2011 and 30th June 2011 respectively by either entering directly the dates or using the
|
||||
calendar widget (by selecting the month name and then the year, you can move more quickly to far
|
||||
away dates).
|
||||
|
||||
<img src="/images/select_dates_pivot_table.png" />
|
||||
|
||||
Next, within the **Query** section, remove the default COUNT(\*) and add Cost, keeping the default
|
||||
SUM aggregate. Note that Apache Superset will indicate the type of the metric by the symbol on the
|
||||
left hand column of the list (ABC for string, # for number, a clock face for time, etc.).
|
||||
|
||||
In **Group by** select **Time**: this will automatically use the Time Column and Time Grain
|
||||
selections we defined in the Time section.
|
||||
|
||||
Within **Columns**, select first Department and then Travel Class. All set – let’s **Run Query** to
|
||||
see some data!
|
||||
|
||||
<img src="/images/tutorial_pivot_table.png" />
|
||||
|
||||
You should see months in the rows and Department and Travel Class in the columns. To get this in our
|
||||
dashboard, select Save, name the chart Tutorial Pivot and using **Add chart to existing dashboard**
|
||||
select **Tutorial Dashboard**, and then finally **Save & go to dashboard**.
|
||||
|
||||
### Line Chart
|
||||
|
||||
In this section, we are going to create a line chart to understand the average price of a ticket by
|
||||
month across the entire dataset. As before, select **New > Chart**, and then tutorial_flights as the
|
||||
datasource and Line Chart as the visualization type.
|
||||
|
||||
In the Time section, as before, keep the Time Column as Travel Date and Time Grain as month but this
|
||||
time for the Time range select No filter as we want to look at entire dataset.
|
||||
|
||||
Within Metrics, remove the default COUNT(\*) and add Cost. This time, we want to change how this
|
||||
column is aggregated to show the mean value: we can do this by selecting AVG in the aggregate
|
||||
dropdown.
|
||||
|
||||
<img src="/images/average_aggregate_for_cost.png" />
|
||||
|
||||
Next, select **Run Query** to show the data on the chart.
|
||||
|
||||
How does this look? Well, we can see that the average cost goes up in December. However, perhaps it
|
||||
doesn’t make sense to combine both single and return tickets, but rather show two separate lines for
|
||||
each ticket type.
|
||||
|
||||
Let’s do this by selecting Ticket Single or Return in the Group by box, and the selecting **Run
|
||||
Query** again. Nice! We can see that on average single tickets are cheaper than returns and that the
|
||||
big spike in December is caused by return tickets.
|
||||
|
||||
Our chart is looking pretty good already, but let’s customize some more by going to the Customize
|
||||
tab on the left hand pane. Within this pane, try changing the Color Scheme, removing the range
|
||||
filter by selecting No in the Show Range Filter drop down and adding some labels using X Axis Label
|
||||
and Y Axis Label.
|
||||
|
||||
<img src="/images/tutorial_line_chart.png" />
|
||||
|
||||
Once you’re done, Save as Tutorial Line Chart, use **Add chart to existing dashboard** to add this
|
||||
chart to the previous ones on the Tutorial Dashboard and then **Save & go to dashboard**.
|
||||
|
||||
### Markup
|
||||
|
||||
In this section, we will add some text to our dashboard. If you’re there already, you can navigate
|
||||
to the dashboard by selecting Dashboards on the top menu, then Tutorial dashboard from the list of
|
||||
dashboards. Got into edit mode by selecting **Edit dashboard**.
|
||||
|
||||
Within the Insert components pane, drag and drop a Markdown box on the dashboard. Look for the blue
|
||||
lines which indicate the anchor where the box will go.
|
||||
|
||||
<img src="/images/blue_bar_insert_component.png" />
|
||||
|
||||
Now, to edit the text, select the box. You can enter text, in markdown format (see
|
||||
[this Markdown Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) for
|
||||
more information about this format). You can toggle between Edit and Preview using the menu on the
|
||||
top of the box.
|
||||
|
||||
<img src="/images/markdown.png" />
|
||||
|
||||
To exit, select any other part of the dashboard. Finally, don’t forget to keep your changes using
|
||||
**Save changes**.
|
||||
|
||||
### Filter Box
|
||||
|
||||
In this section, you will learn how to add a filter to your dashboard. Specifically, we will create
|
||||
a filter that allows us to look at those flights that depart from a particular country.
|
||||
|
||||
A filter box visualization can be created as any other visualization by selecting **New > Chart**,
|
||||
and then tutorial_flights as the datasource and Filter Box as the visualization type.
|
||||
|
||||
First of all, in the **Time** section, remove the filter from the Time range selection by selecting
|
||||
No filter.
|
||||
|
||||
Next, in **Filters Configurations** first add a new filter by selecting the plus sign and then edit
|
||||
the newly created filter by selecting the pencil icon.
|
||||
|
||||
For our use case, it makes most sense to present a list of countries in alphabetical order. First,
|
||||
enter the column as Origin Country and keep all other options the same and then select **Run
|
||||
Query**. This gives us a preview of our filter.
|
||||
|
||||
Next, remove the date filter by unchecking the Date Filter checkbox.
|
||||
|
||||
<img src="/images/filter_on_origin_country.png" />
|
||||
|
||||
Finally, select **Save**, name the chart as Tutorial Filter, add the chart to our existing Tutorial
|
||||
Dashboard and then Save & go to dashboard. Once on the Dashboard, try using the filter to show only
|
||||
those flights that departed from the United Kingdom – you will see the filter is applied to all of
|
||||
the other visualizations on the dashboard.
|
||||
|
||||
### Publishing Your Dashboard
|
||||
|
||||
If you have followed all of the steps outlined in the previous section, you should have a dashboard
|
||||
that looks like the below. If you would like, you can rearrange the elements of the dashboard by
|
||||
selecting **Edit dashboard** and dragging and dropping.
|
||||
|
||||
If you would like to make your dashboard available to other users, simply select Draft next to the
|
||||
title of your dashboard on the top left to change your dashboard to be in Published state. You can
|
||||
also favorite this dashboard by selecting the star.
|
||||
|
||||
<img src="/images/publish_dashboard.png" />
|
||||
|
||||
### Annotations
|
||||
|
||||
Annotations allow you to add additional context to your chart. In this section, we will add an
|
||||
annotation to the Tutorial Line Chart we made in a previous section. Specifically, we will add the
|
||||
dates when some flights were cancelled by the UK’s Civil Aviation Authority in response to the
|
||||
eruption of the Grímsvötn volcano in Iceland (23-25 May 2011).
|
||||
|
||||
First, add an annotation layer by navigating to Manage ‣ Annotation Layers. Add a new annotation
|
||||
layer by selecting the green plus sign to add a new record. Enter the name Volcanic Eruptions and
|
||||
save. We can use this layer to refer to a number of different annotations.
|
||||
|
||||
Next, add an annotation by navigating to Manage ‣ Annotations and then create a new annotation by
|
||||
selecting the green plus sign. Then, select the Volcanic Eruptions layer, add a short description
|
||||
Grímsvötn and the eruption dates (23-25 May 2011) before finally saving.
|
||||
|
||||
<img src="/images/edit_annotation.png" />
|
||||
|
||||
Then, navigate to the line chart by going to Charts then selecting Tutorial Line Chart from the
|
||||
list. Next, go to the Annotations and Layers section and select Add Annotation Layer. Within this
|
||||
dialogue:
|
||||
|
||||
- Name the layer as Volcanic Eruptions
|
||||
- Change the Annotation Layer Type to Event
|
||||
- Set the Annotation Source as Superset annotation
|
||||
- Specify the Annotation Layer as Volcanic Eruptions
|
||||
|
||||
<img src="/images/annotation_settings.png" />
|
||||
|
||||
Select **Apply** to see your annotation shown on the chart.
|
||||
|
||||
<img src="/images/annotation.png" />
|
||||
|
||||
If you wish, you can change how your annotation looks by changing the settings in the Display
|
||||
configuration section. Otherwise, select **OK** and finally **Save** to save your chart. If you keep
|
||||
the default selection to overwrite the chart, your annotation will be saved to the chart and also
|
||||
appear automatically in the Tutorial Dashboard.
|
||||
|
||||
### Advanced Analytics
|
||||
|
||||
In this section, we are going to explore the Advanced Analytics feature of Apache Superset that
|
||||
allows you to apply additional transformations to your data. The three types of transformation are:
|
||||
|
||||
**Setting up the base chart**
|
||||
|
||||
In this section, we’re going to set up a base chart which we can then apply the different **Advanced
|
||||
Analytics** features to. Start off by creating a new chart using the same _tutorial_flights_
|
||||
datasource and the **Line Chart** visualization type. Within the Time section, set the Time Range as
|
||||
1st October 2011 and 31st October 2011.
|
||||
|
||||
Next, in the query section, change the Metrics to the sum of Cost. Select **Run Query** to show the
|
||||
chart. You should see the total cost per day for each month in October 2011.
|
||||
|
||||
<img src="/images/advanced_analytics_base.png" />
|
||||
|
||||
Finally, save the visualization as Tutorial Advanced Analytics Base, adding it to the Tutorial
|
||||
Dashboard.
|
||||
|
||||
### Rolling Mean
|
||||
|
||||
There is quite a lot of variation in the data, which makes it difficult to identify any trend. One
|
||||
approach we can take is to show instead a rolling average of the time series. To do this, in the
|
||||
**Moving Average** subsection of **Advanced Analytics**, select mean in the **Rolling** box and
|
||||
enter 7 into both Periods and Min Periods. The period is the length of the rolling period expressed
|
||||
as a multiple of the Time Grain. In our example, the Time Grain is day, so the rolling period is 7
|
||||
days, such that on the 7th October 2011 the value shown would correspond to the first seven days of
|
||||
October 2011. Lastly, by specifying Min Periods as 7, we ensure that our mean is always calculated
|
||||
on 7 days and we avoid any ramp up period.
|
||||
|
||||
After displaying the chart by selecting **Run Query** you will see that the data is less variable
|
||||
and that the series starts later as the ramp up period is excluded.
|
||||
|
||||
<img src="/images/rolling_mean.png" />
|
||||
|
||||
Save the chart as Tutorial Rolling Mean and add it to the Tutorial Dashboard.
|
||||
|
||||
### Time Comparison
|
||||
|
||||
In this section, we will compare values in our time series to the value a week before. Start off by
|
||||
opening the Tutorial Advanced Analytics Base chart, by going to **Charts** in the top menu and then
|
||||
selecting the visualization name in the list (alternatively, find the chart in the Tutorial
|
||||
Dashboard and select Explore chart from the menu for that visualization).
|
||||
|
||||
Next, in the Time Comparison subsection of **Advanced Analytics**, enter the Time Shift by typing in
|
||||
“minus 1 week” (note this box accepts input in natural language). Run Query to see the new chart,
|
||||
which has an additional series with the same values, shifted a week back in time.
|
||||
|
||||
<img src="/images/time_comparison_two_series.png" />
|
||||
|
||||
Then, change the **Calculation type** to Absolute difference and select **Run Query**. We can now
|
||||
see only one series again, this time showing the difference between the two series we saw
|
||||
previously.
|
||||
|
||||
<img src="/images/time_comparison_absolute_difference.png" />
|
||||
|
||||
Save the chart as Tutorial Time Comparison and add it to the Tutorial Dashboard.
|
||||
|
||||
### Resampling the data
|
||||
|
||||
In this section, we’ll resample the data so that rather than having daily data we have weekly data.
|
||||
As in the previous section, reopen the Tutorial Advanced Analytics Base chart.
|
||||
|
||||
Next, in the Python Functions subsection of **Advanced Analytics**, enter 7D, corresponding to seven
|
||||
days, in the Rule and median as the Method and show the chart by selecting **Run Query**.
|
||||
|
||||
<img src="/images/resample.png" />
|
||||
|
||||
Note that now we have a single data point every 7 days. In our case, the value showed corresponds to
|
||||
the median value within the seven daily data points. For more information on the meaning of the
|
||||
various options in this section, refer to the
|
||||
[Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html).
|
||||
|
||||
Lastly, save your chart as Tutorial Resample and add it to the Tutorial Dashboard. Go to the
|
||||
tutorial dashboard to see the four charts side by side and compare the different outputs.
|
||||
277
docs/src/pages/docs/Creating Charts and Dashboards/index.mdx
Normal file
@@ -0,0 +1,277 @@
|
||||
---
|
||||
name: Creating Your First Dashboard
|
||||
menu: Creating Charts and Dashboards
|
||||
route: /docs/creating-charts-dashboards/first-dashboard
|
||||
index: 1
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Creating Your First Dashboard
|
||||
|
||||
This section is focused on documentation for end-users (data analysts, business analysts, data
|
||||
scientists, etc.). In addition to this site, Preset.io maintains an upto-date set of end-user
|
||||
documentation at [docs.preset.io](https://docs.preset.io/).
|
||||
|
||||
This tutorial targets someone who wants to create charts and dashboards in Superset. We’ll show you
|
||||
how to connect Superset to a new database and configure a table in that database for analysis.
|
||||
You’ll also explore the data you’ve exposed and add a visualization to a dashboard so that you get a
|
||||
feel for the end-to-end user experience.
|
||||
|
||||
### Connecting to a new database
|
||||
|
||||
We assume you already have a database configured and can connect to it from the instance on which
|
||||
you’re running Superset. If you’re just testing Superset and want to explore sample data, you can
|
||||
load some sample PostgreSQL datasets into a fresh DB, or configure the
|
||||
[example weather data](https://github.com/dylburger/noaa-ghcn-weather-data) we use here.
|
||||
|
||||
Under the **Sources** menu, select the _Databases_ option:
|
||||
|
||||
<img src="/images/tutorial_01_sources_database.png" />{' '}
|
||||
|
||||
On the resulting page, click on the green plus sign, near the top right:
|
||||
|
||||
<img src="/images/tutorial_02_add_database.png" />{' '}
|
||||
|
||||
You can configure a number of advanced options on this page, but for this walkthrough, you’ll only
|
||||
need to do **two things**:
|
||||
|
||||
1. Name your database connection:
|
||||
|
||||
<img src="/images/tutorial_03_database_name.png" />
|
||||
|
||||
Provide the SQLAlchemy Connection URI and test the connection:
|
||||
|
||||
<img src="/images/tutorial_04_sqlalchemy_connection_string.png" />
|
||||
|
||||
This example shows the connection string for our test weather database. As noted in the text below
|
||||
the URI, you should refer to the SQLAlchemy documentation on
|
||||
[creating new connection URIs](https://docs.sqlalchemy.org/en/12/core/engines.html#database-urls)
|
||||
for your target database.
|
||||
|
||||
Click the **Test Connection** button to confirm things work end to end. Once Superset can
|
||||
successfully connect and authenticate, you should see a popup like this:
|
||||
|
||||
<img src="/images/tutorial_05_connection_popup.png" />
|
||||
|
||||
Moreover, you should also see the list of tables Superset can read from the schema you’re connected
|
||||
to, at the bottom of the page:
|
||||
|
||||
<img src="/images/tutorial_06_list_of_tables.png" />
|
||||
|
||||
If the connection looks good, save the configuration by clicking the **Save** button at the bottom
|
||||
of the page:
|
||||
|
||||
<img src="/images/tutorial_07_save_button.png" />
|
||||
|
||||
### Adding a new table
|
||||
|
||||
Now that you’ve configured a database, you’ll need to add specific tables to Superset that you’d
|
||||
like to query.
|
||||
|
||||
Under the **Sources** menu, select the _Tables_ option:
|
||||
|
||||
<img src="/images/tutorial_08_sources_tables.png" />
|
||||
|
||||
On the resulting page, click on the green plus sign, near the top left:
|
||||
|
||||
<img src="/images/tutorial_09_add_new_table.png" />
|
||||
|
||||
You only need a few pieces of information to add a new table to Superset:
|
||||
|
||||
- The name of the table
|
||||
|
||||
<img src="/images/tutorial_10_table_name.png" />
|
||||
|
||||
- The target database from the **Database** drop-down menu (i.e. the one you just added above)
|
||||
|
||||
<img src="/images/tutorial_11_choose_db.png" />
|
||||
|
||||
- Optionally, the database schema. If the table exists in the “default” schema (e.g. the public
|
||||
schema in PostgreSQL or Redshift), you can leave the schema field blank.
|
||||
|
||||
Click on the **Save** button to save the configuration:
|
||||
|
||||
<img src="/images/tutorial_07_save_button.png" />
|
||||
|
||||
When redirected back to the list of tables, you should see a message indicating that your table was
|
||||
created:
|
||||
|
||||
<img src="/images/tutorial_12_table_creation_success_msg.png" />
|
||||
|
||||
This message also directs you to edit the table configuration. We’ll edit a limited portion of the
|
||||
configuration now - just to get you started - and leave the rest for a more advanced tutorial.
|
||||
|
||||
Click on the edit button next to the table you’ve created:
|
||||
|
||||
<img src="/images/tutorial_13_edit_table_config.png" />
|
||||
|
||||
On the resulting page, click on the **List Table Column** tab. Here, you’ll define the way you can
|
||||
use specific columns of your table when exploring your data. We’ll run through these options to
|
||||
describe their purpose:
|
||||
|
||||
If you want users to group metrics by a specific field, mark it as **Groupable**.
|
||||
|
||||
If you need to filter on a specific field, mark it as **Filterable**.
|
||||
|
||||
Is this field something you’d like to get the distinct count of? Check the **Count Distinct** box.
|
||||
|
||||
Is this a metric you want to sum, or get basic summary statistics for? The **Sum, Min**, and **Max**
|
||||
columns will help.
|
||||
|
||||
The **is temporal** field should be checked for any date or time fields. We’ll cover how this
|
||||
manifests itself in analyses in a moment.
|
||||
|
||||
Here’s how we’ve configured fields for the weather data. Even for measures like the weather
|
||||
measurements (precipitation, snowfall, etc.), it’s ideal to group and filter by these values:
|
||||
|
||||
<img src="/images/tutorial_14_field_config.png" />
|
||||
|
||||
As with the configurations above, click the **Save** button to save these settings.
|
||||
|
||||
### Exploring your data
|
||||
|
||||
To start exploring your data, simply click on the table name you just created in the list of
|
||||
available tables:
|
||||
|
||||
<img src="/images/tutorial_15_click_table_name.png" />
|
||||
|
||||
By default, you’ll be presented with a Table View:
|
||||
|
||||
<img src="/images/tutorial_16_datasource_chart_type.png" />
|
||||
|
||||
Let’s walk through a basic query to get the count of all records in our table. First, we’ll need to
|
||||
change the **Since** filter to capture the range of our data. You can use simple phrases to apply
|
||||
these filters, like “3 years ago”:
|
||||
|
||||
<img src="/images/tutorial_17_choose_time_range.png" />
|
||||
|
||||
The upper limit for time, the **Until** filter, defaults to “now”, which may or may not be what you
|
||||
want. Look for the Metrics section under the **GROUP BY** header, and start typing “Count” - you’ll
|
||||
see a list of metrics matching what you type:
|
||||
|
||||
<img src="/images/tutorial_18_choose_metric.png" />
|
||||
|
||||
Select the _COUNT(\*)_ metric, then click the green **Query** button near the top of the explore:
|
||||
|
||||
<img src="/images/tutorial_19_click_query.png" />
|
||||
|
||||
You’ll see your results in the table:
|
||||
|
||||
<img src="/images/tutorial_20_count_star_result.png" />
|
||||
|
||||
Let’s group this by the weather_description field to get the count of records by the type of weather
|
||||
recorded by adding it to the Group by section:
|
||||
|
||||
<img src="/images/tutorial_21_group_by.png" />
|
||||
|
||||
and run the query:
|
||||
|
||||
<img src="/images/tutorial_22_group_by_result.png" />
|
||||
|
||||
Let’s find a more useful data point: the top 10 times and places that recorded the highest
|
||||
temperature in 2015. We replace weather_description with latitude, longitude and measurement_date in
|
||||
the **Group by** section:
|
||||
|
||||
<img src="/images/tutorial_23_group_by_more_dimensions.png" />
|
||||
|
||||
And replace _COUNT(\*)_ with _max\_\_measurement_flag_:
|
||||
|
||||
<img src="/images/tutorial_24_max_metric.png" />
|
||||
|
||||
The _max\_\_measurement_flag_ metric was created when we checked the box under **Max** and next to
|
||||
the _measurement_flag_ field, indicating that this field was numeric and that we wanted to find its
|
||||
maximum value when grouped by specific fields.
|
||||
|
||||
In our case, _measurement_flag_ is the value of the measurement taken, which clearly depends on the
|
||||
type of measurement (the researchers recorded different values for precipitation and temperature).
|
||||
Therefore, we must filter our query only on records where the _weather_description_ is equal to
|
||||
“Maximum temperature”, which we do in the **Filters** section at the bottom of the explore:
|
||||
|
||||
<img src="/images/tutorial_25_max_temp_filter.png" />
|
||||
|
||||
Finally, since we only care about the top 10 measurements, we limit our results to 10 records using
|
||||
the Row _limit_ option under the **Options** header:
|
||||
|
||||
<img src="/images/tutorial_26_row_limit.png" />
|
||||
|
||||
We click **Query** and get the following results:
|
||||
|
||||
<img src="/images/tutorial_27_top_10_max_temps.png" />
|
||||
|
||||
In this dataset, the maximum temperature is recorded in tenths of a degree Celsius. The top value of
|
||||
1370, measured in the middle of Nevada, is equal to 137 C, or roughly 278 degrees F. It’s unlikely
|
||||
this value was correctly recorded. We’ve already been able to investigate some outliers with
|
||||
Superset, but this just scratches the surface of what we can do.
|
||||
|
||||
You may want to do a couple more things with this measure:
|
||||
|
||||
- The default formatting shows values like 1.37k, which may be difficult for some users to read.
|
||||
It’s likely you may want to see the full, comma-separated value. You can change the formatting of
|
||||
any measure by editing its config (**Edit Table Config > List Sql Metric > Edit Metric >
|
||||
D3Format**)
|
||||
|
||||
= Moreover, you may want to see the temperature measurements in plain degrees C, not tenths of a
|
||||
degree. Or you may want to convert the temperature to degrees Fahrenheit. You can change the SQL
|
||||
that gets executed against the database, baking the logic into the measure itself (**Edit Table
|
||||
Config > List Sql Metric > Edit Metric > SQL Expression**).
|
||||
|
||||
For now, though, let’s create a better visualization of these data and add it to a dashboard. We can
|
||||
change the Chart Type to “Distribution - Bar Chart”:
|
||||
|
||||
<img src="/images/tutorial_28_bar_chart.png" />
|
||||
|
||||
Our filter on Maximum temperature measurements was retained, but the query and formatting options
|
||||
are dependent on the chart type, so you’ll have to set the values again:
|
||||
|
||||
<img src="/images/tutorial_29_bar_chart_series_metrics.png" />
|
||||
|
||||
You should note the extensive formatting options for this chart: the ability to set axis labels,
|
||||
margins, ticks, etc. To make the data presentable to a broad audience, you’ll want to apply many of
|
||||
these to slices that end up in dashboards. For now, though, we run our query and get the following
|
||||
chart:
|
||||
|
||||
<img src="/images/tutorial_30_bar_chart_results.png" />
|
||||
|
||||
### Creating a slice and dashboard
|
||||
|
||||
This view might be interesting to researchers, so let’s save it. In Superset, a saved query is
|
||||
called a **Slice**.
|
||||
|
||||
To create a slice, click the **Save as** button near the top-left of the explore:
|
||||
|
||||
<img src="/images/tutorial_19_click_query.png" />
|
||||
|
||||
A popup should appear, asking you to name the slice, and optionally add it to a dashboard. Since we
|
||||
haven’t yet created any dashboards, we can create one and immediately add our slice to it. Let’s do
|
||||
it:
|
||||
|
||||
<img src="/images/tutorial_31_save_slice_to_dashboard.png" />
|
||||
|
||||
Click **Save**, which will direct you back to your original query. We see that our slice and
|
||||
dashboard were successfully created:
|
||||
|
||||
<img src="/images/tutorial_32_save_slice_confirmation.png" />
|
||||
|
||||
Let’s check out our new dashboard. We click on the **Dashboards** menu:
|
||||
|
||||
<img src="/images/tutorial_33_dashboard.png" />
|
||||
|
||||
and find the dashboard we just created:
|
||||
|
||||
<img src="/images/tutorial_34_weather_dashboard.png" />
|
||||
|
||||
Things seemed to have worked - our slice is here!
|
||||
|
||||
<img src="/images/tutorial_35_slice_on_dashboard.png" />
|
||||
|
||||
But it’s a bit smaller than we might like. Luckily, you can adjust the size of slices in a dashboard
|
||||
by clicking, holding and dragging the bottom-right corner to your desired dimensions:
|
||||
|
||||
<img src="/images/tutorial_36_adjust_dimensions.gif" />
|
||||
|
||||
After adjusting the size, you’ll be asked to click on the icon near the top-right of the dashboard
|
||||
to save the new configuration.
|
||||
|
||||
Congrats! You’ve successfully linked, analyzed, and visualized data in Superset. There are a wealth
|
||||
of other table configuration and visualization options, so please start exploring and creating
|
||||
slices and dashboards of your own
|
||||
117
docs/src/pages/docs/Miscellaneous/importing-exporting.mdx
Normal file
@@ -0,0 +1,117 @@
|
||||
---
|
||||
name: Importing and Exporting Datasources
|
||||
menu: Miscellaneous
|
||||
route: /docs/miscellaneous/importing-exporting-datasources
|
||||
index: 2
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Importing and Exporting Datasources
|
||||
|
||||
The superset cli allows you to import and export datasources from and to YAML. Datasources include
|
||||
both databases and druid clusters. The data is expected to be organized in the following hierarchy:
|
||||
|
||||
```
|
||||
├──databases
|
||||
| ├──database_1
|
||||
| | ├──table_1
|
||||
| | | ├──columns
|
||||
| | | | ├──column_1
|
||||
| | | | ├──column_2
|
||||
| | | | └──... (more columns)
|
||||
| | | └──metrics
|
||||
| | | ├──metric_1
|
||||
| | | ├──metric_2
|
||||
| | | └──... (more metrics)
|
||||
| | └── ... (more tables)
|
||||
| └── ... (more databases)
|
||||
└──druid_clusters
|
||||
├──cluster_1
|
||||
| ├──datasource_1
|
||||
| | ├──columns
|
||||
| | | ├──column_1
|
||||
| | | ├──column_2
|
||||
| | | └──... (more columns)
|
||||
| | └──metrics
|
||||
| | ├──metric_1
|
||||
| | ├──metric_2
|
||||
| | └──... (more metrics)
|
||||
| └── ... (more datasources)
|
||||
└── ... (more clusters)
|
||||
```
|
||||
|
||||
### Exporting Datasources to YAML
|
||||
|
||||
You can print your current datasources to stdout by running:
|
||||
|
||||
```
|
||||
superset export_datasources
|
||||
```
|
||||
|
||||
To save your datasources to a file run:
|
||||
|
||||
```
|
||||
superset export_datasources -f <filename>
|
||||
```
|
||||
|
||||
By default, default (null) values will be omitted. Use the -d flag to include them. If you want back
|
||||
references to be included (e.g. a column to include the table id it belongs to) use the -b flag.
|
||||
|
||||
Alternatively, you can export datasources using the UI:
|
||||
|
||||
1. Open **Sources -> Databases** to export all tables associated to a single or multiple databases.
|
||||
(**Tables** for one or more tables, **Druid Clusters** for clusters, **Druid Datasources** for
|
||||
datasources)
|
||||
2. Select the items you would like to export.
|
||||
3. Click **Actions -> Export** to YAML
|
||||
4. If you want to import an item that you exported through the UI, you will need to nest it inside
|
||||
its parent element, e.g. a database needs to be nested under databases a table needs to be nested
|
||||
inside a database element.
|
||||
|
||||
In order to obtain an **exhaustive list of all fields** you can import using the YAML import run:
|
||||
|
||||
```
|
||||
superset export_datasource_schema
|
||||
```
|
||||
|
||||
As a reminder, you can use the `-b` flag to include back references.
|
||||
|
||||
### Importing Datasources from YAML
|
||||
|
||||
In order to import datasources from a YAML file(s), run:
|
||||
|
||||
```
|
||||
superset import_datasources -p <path or filename>
|
||||
```
|
||||
|
||||
If you supply a path all files ending with **yaml** or **yml** will be parsed. You can apply
|
||||
additional flags (e.g. to search the supplied path recursively):
|
||||
|
||||
```
|
||||
superset import_datasources -p <path> -r
|
||||
```
|
||||
|
||||
The sync flag **-s** takes parameters in order to sync the supplied elements with your file. Be
|
||||
careful this can delete the contents of your meta database. Example:
|
||||
|
||||
```
|
||||
superset import_datasources -p <path / filename> -s columns,metrics
|
||||
```
|
||||
|
||||
This will sync all metrics and columns for all datasources found in the `<path /filename>` in the
|
||||
Superset meta database. This means columns and metrics not specified in YAML will be deleted. If you
|
||||
would add tables to columns,metrics those would be synchronised as well.
|
||||
|
||||
If you don’t supply the sync flag (**-s**) importing will only add and update (override) fields.
|
||||
E.g. you can add a verbose_name to the column ds in the table random_time_series from the example
|
||||
datasets by saving the following YAML to file and then running the **import_datasources** command.
|
||||
|
||||
```
|
||||
databases:
|
||||
- database_name: main
|
||||
tables:
|
||||
- table_name: random_time_series
|
||||
columns:
|
||||
- column_name: ds
|
||||
verbose_name: datetime
|
||||
```
|
||||
94
docs/src/pages/docs/Miscellaneous/index.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
name: Country Map Tools
|
||||
menu: Miscellaneous
|
||||
route: /docs/miscellaneous/country-map-tools
|
||||
index: 1
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Country Map Tools
|
||||
|
||||
This tool is used in slices for visualization number or string by region, province or department of
|
||||
your countries. So, if you want to use tools, you need ISO 3166-2 code of region, province or
|
||||
department.
|
||||
|
||||
ISO 3166-2 is part of the ISO 3166 standard published by the International Organization for
|
||||
Standardization (ISO), and defines codes for identifying the principal subdivisions (e.g., provinces
|
||||
or states) of all countries coded in ISO 3166-1
|
||||
|
||||
The purpose of ISO 3166-2 is to establish an international standard of short and unique alphanumeric
|
||||
codes to represent the relevant administrative divisions and dependent territories of all countries
|
||||
in a more convenient and less ambiguous form than their full names. Each complete ISO 3166-2 code
|
||||
consists of two parts, separated by a hyphen:
|
||||
|
||||
The first part is the ISO 3166-1 alpha-2 code of the country; The second part is a string of up to
|
||||
three alphanumeric characters, which is usually obtained from national sources and stems from coding
|
||||
systems already in use in the country concerned, but may also be developed by the ISO itself.
|
||||
|
||||
We can apply these concepts to specify the state of Texas in the country of United States:
|
||||
|
||||
```
|
||||
US-TX
|
||||
```
|
||||
|
||||
### Included Codes
|
||||
|
||||
The ISO codes for the following countries are included in Superset:
|
||||
|
||||
- Belgium
|
||||
- China
|
||||
- Egypt
|
||||
- France
|
||||
- Germany
|
||||
- Japan
|
||||
- Liechtenstein
|
||||
- Morocco
|
||||
- Russia
|
||||
- Singapore
|
||||
- Spain
|
||||
- Switzerland
|
||||
- United Kingdom
|
||||
- Ukraine
|
||||
- USA
|
||||
|
||||
### Adding New Countries
|
||||
|
||||
To add a new country in country map tools, you need to follow the following steps:
|
||||
|
||||
- You need shapefiles which contain data of your map. You can get this file on this site:
|
||||
https://www.diva-gis.org/gdata
|
||||
- You need to add ISO 3166-2 with column name ISO for all record in your file. It’s important
|
||||
because it’s a norm for mapping your data with geojson file
|
||||
- You need to convert shapefile to geojson file. This action can make with ogr2ogr tools:
|
||||
https://www.gdal.org/ogr2ogr.html
|
||||
- Put your geojson file in next folder : superset-frontend/src/visualizations/CountryMap/countries
|
||||
with the next name : nameofyourcountries.geojson
|
||||
- You can to reduce size of geojson file on this site: https://mapshaper.org/
|
||||
- Go in file `superset-frontend/src/explore/controls.jsx`
|
||||
- Add your country in component ‘select_country’. Here's an example:
|
||||
|
||||
```
|
||||
type: 'SelectControl',
|
||||
label: 'Country Name Type',
|
||||
default: 'France',
|
||||
choices: [
|
||||
'Belgium',
|
||||
'Brazil',
|
||||
'China',
|
||||
'Egypt',
|
||||
'France',
|
||||
'Germany',
|
||||
'Italy',
|
||||
'Japan',
|
||||
'Korea',
|
||||
'Morocco',
|
||||
'Netherlands',
|
||||
'Russia',
|
||||
'Singapore',
|
||||
'Spain',
|
||||
'Uk',
|
||||
'Usa',
|
||||
].map(s => [s, s]),
|
||||
description: 'The name of country that Superset should display',
|
||||
},
|
||||
```
|
||||
25
docs/src/pages/docs/contributing-page.mdx
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: Contribution Guide
|
||||
title: Contribution Guide
|
||||
route: /docs/contribution
|
||||
---
|
||||
|
||||
## Contributing to Superset
|
||||
|
||||
Superset is currently being incubated at
|
||||
[the Apache Software Foundation](https://www.apache.org/theapacheway/index.html). The core
|
||||
contributors (or committers) to Superset communicate primarily in the following channels (all of
|
||||
which you can join):
|
||||
|
||||
- [mailing list](https://lists.apache.org/list.html?dev@superset.apache.org)
|
||||
- [Apache Superset Slack community](https://apache-superset.slack.com/join/shared_invite/zt-g8lpruog-HeqpgYrwdfrD5OYhlU7hPQ#/)
|
||||
- [Github issues and PR's](https://github.com/apache/incubator-superset/issues)
|
||||
|
||||
If you're interested in contributing, we recommend reading the Community Contribution Guide
|
||||
[described in CONTRIBUTING.MD](https://github.com/apache/incubator-superset/blob/master/CONTRIBUTING.md)
|
||||
to get started. Here are some helpful links from that page:
|
||||
|
||||
- [Overview of types of contributions](https://github.com/apache/incubator-superset/blob/master/CONTRIBUTING.md#types-of-contributions)
|
||||
- [Pull request guidelines](https://github.com/apache/incubator-superset/blob/master/CONTRIBUTING.md#pull-request-guidelines)
|
||||
- [Managing Issues and PR's](https://github.com/apache/incubator-superset/blob/master/CONTRIBUTING.md#managing-issues-and-prs)
|
||||
- [Setting up local environment for development](https://github.com/apache/incubator-superset/blob/master/CONTRIBUTING.md#setup-local-environment-for-development)
|
||||
289
docs/src/pages/docs/frequently-asked-questions-page.mdx
Normal file
@@ -0,0 +1,289 @@
|
||||
---
|
||||
name: Frequently Asked Questions
|
||||
title: Frequently Asked Questions
|
||||
route: /docs/frequently-asked-questions
|
||||
---
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
### Can I join / query multiple tables at one time?
|
||||
|
||||
Not in the Explore or Visualization UI. A Superset SQLAlchemy datasource can only be a single table
|
||||
or a view.
|
||||
|
||||
When working with tables, the solution would be to materialize a table that contains all the fields
|
||||
needed for your analysis, most likely through some scheduled batch process.
|
||||
|
||||
A view is a simple logical layer that abstract an arbitrary SQL queries as a virtual table. This can
|
||||
allow you to join and union multiple tables, and to apply some transformation using arbitrary SQL
|
||||
expressions. The limitation there is your database performance as Superset effectively will run a
|
||||
query on top of your query (view). A good practice may be to limit yourself to joining your main
|
||||
large table to one or many small tables only, and avoid using _GROUP BY_ where possible as Superset
|
||||
will do its own _GROUP BY_ and doing the work twice might slow down performance.
|
||||
|
||||
Whether you use a table or a view, the important factor is whether your database is fast enough to
|
||||
serve it in an interactive fashion to provide a good user experience in Superset.
|
||||
|
||||
### How BIG can my datasource be?
|
||||
|
||||
It can be gigantic! Superset acts as a thin layer above your underlying databases or data engines.
|
||||
|
||||
As mentioned above, the main criteria is whether your database can execute queries and return
|
||||
results in a time frame that is acceptable to your users. Many distributed databases out there can
|
||||
execute queries that scan through terabytes in an interactive fashion.
|
||||
|
||||
### How do I create my own visualization?
|
||||
|
||||
We recommend reading the instructions in
|
||||
[Building Custom Viz Plugins](/docs/installation/building-custom-viz-plugins).
|
||||
|
||||
### Can I upload and visualize CSV data?
|
||||
|
||||
Absolutely! Read the instructions [here](/docs/creating-charts-dashboards/exploring-data) to learn
|
||||
how to enable and use CSV upload.
|
||||
|
||||
### Why are my queries timing out?
|
||||
|
||||
There are many reasons may cause long query timing out.
|
||||
|
||||
For running long query from Sql Lab, by default Superset allows it run as long as 6 hours before it
|
||||
being killed by celery. If you want to increase the time for running query, you can specify the
|
||||
timeout in configuration. For example:
|
||||
|
||||
```
|
||||
SQLLAB_ASYNC_TIME_LIMIT_SEC = 60 * 60 * 6
|
||||
```
|
||||
|
||||
Superset is running on gunicorn web server, which may time out web requests. If you want to increase
|
||||
the default (50), you can specify the timeout when starting the web server with the -t flag, which
|
||||
is expressed in seconds.
|
||||
|
||||
```
|
||||
superset runserver -t 300
|
||||
```
|
||||
|
||||
If you are seeing timeouts (504 Gateway Time-out) when loading dashboard or explore slice, you are
|
||||
probably behind gateway or proxy server (such as Nginx). If it did not receive a timely response
|
||||
from Superset server (which is processing long queries), these web servers will send 504 status code
|
||||
to clients directly. Superset has a client-side timeout limit to address this issue. If query didn’t
|
||||
come back within clint-side timeout (60 seconds by default), Superset will display warning message
|
||||
to avoid gateway timeout message. If you have a longer gateway timeout limit, you can change the
|
||||
timeout settings in **superset_config.py**:
|
||||
|
||||
```
|
||||
SUPERSET_WEBSERVER_TIMEOUT = 60
|
||||
```
|
||||
|
||||
### Why is the map not visible in the geospatial visualization?
|
||||
|
||||
You need to register a free account at [Mapbox.com](www.mapbox.com), obtain an API key, and add it
|
||||
to **superset_config.py** at the key MAPBOX_API_KEY:
|
||||
|
||||
```
|
||||
MAPBOX_API_KEY = "longstringofalphanumer1c"
|
||||
```
|
||||
|
||||
### How to add dynamic filters to a dashboard?
|
||||
|
||||
Use the **Filter Box** widget, build a slice, and add it to your dashboard.
|
||||
|
||||
The **Filter Box** widget allows you to define a query to populate dropdowns that can be used for
|
||||
filtering. To build the list of distinct values, we run a query, and sort the result by the metric
|
||||
you provide, sorting descending.
|
||||
|
||||
The widget also has a checkbox **Date Filter**, which enables time filtering capabilities to your
|
||||
dashboard. After checking the box and refreshing, you’ll see a from and a to dropdown show up.
|
||||
|
||||
By default, the filtering will be applied to all the slices that are built on top of a datasource
|
||||
that shares the column name that the filter is based on. It’s also a requirement for that column to
|
||||
be checked as “filterable” in the column tab of the table editor.
|
||||
|
||||
But what about if you don’t want certain widgets to get filtered on your dashboard? You can do that
|
||||
by editing your dashboard, and in the form, edit the JSON Metadata field, more specifically the
|
||||
`filter_immune_slices` key, that receives an array of sliceIds that should never be affected by any
|
||||
dashboard level filtering.
|
||||
|
||||
```
|
||||
{
|
||||
"filter_immune_slices": [324, 65, 92],
|
||||
"expanded_slices": {},
|
||||
"filter_immune_slice_fields": {
|
||||
"177": ["country_name", "__time_range"],
|
||||
"32": ["__time_range"]
|
||||
},
|
||||
"timed_refresh_immune_slices": [324]
|
||||
}
|
||||
```
|
||||
|
||||
In the json blob above, slices 324, 65 and 92 won’t be affected by any dashboard level filtering.
|
||||
|
||||
Now note the `filter_immune_slice_fields` key. This one allows you to be more specific and define
|
||||
for a specific slice_id, which filter fields should be disregarded.
|
||||
|
||||
Note the use of the `__time_range` keyword, which is reserved for dealing with the time boundary
|
||||
filtering mentioned above.
|
||||
|
||||
But what happens with filtering when dealing with slices coming from different tables or databases?
|
||||
If the column name is shared, the filter will be applied, it’s as simple as that.
|
||||
|
||||
### How to limit the timed refresh on a dashboard?
|
||||
|
||||
By default, the dashboard timed refresh feature allows you to automatically re-query every slice on
|
||||
a dashboard according to a set schedule. Sometimes, however, you won’t want all of the slices to be
|
||||
refreshed - especially if some data is slow moving, or run heavy queries. To exclude specific slices
|
||||
from the timed refresh process, add the `timed_refresh_immune_slices` key to the dashboard JSON
|
||||
Metadata field:
|
||||
|
||||
```
|
||||
{
|
||||
"filter_immune_slices": [],
|
||||
"expanded_slices": {},
|
||||
"filter_immune_slice_fields": {},
|
||||
"timed_refresh_immune_slices": [324]
|
||||
}
|
||||
```
|
||||
|
||||
In the example above, if a timed refresh is set for the dashboard, then every slice except 324 will
|
||||
be automatically re-queried on schedule.
|
||||
|
||||
Slice refresh will also be staggered over the specified period. You can turn off this staggering by
|
||||
setting the `stagger_refresh` to false and modify the stagger period by setting `stagger_time` to a
|
||||
value in milliseconds in the JSON Metadata field:
|
||||
|
||||
```
|
||||
{
|
||||
"stagger_refresh": false,
|
||||
"stagger_time": 2500
|
||||
}
|
||||
```
|
||||
|
||||
Here, the entire dashboard will refresh at once if periodic refresh is on. The stagger time of 2.5
|
||||
seconds is ignored.
|
||||
|
||||
**Why does ‘flask fab’ or superset freezed/hung/not responding when started (my home directory is
|
||||
NFS mounted)?**
|
||||
|
||||
By default, Superset creates and uses an SQLite database at `~/.superset/superset.db`. SQLite is
|
||||
known to [not work well if used on NFS](https://www.sqlite.org/lockingv3.html) due to broken file
|
||||
locking implementation on NFS.
|
||||
|
||||
You can override this path using the **SUPERSET_HOME** environment variable.
|
||||
|
||||
Another workaround is to change where superset stores the sqlite database by adding the following in
|
||||
`superset_config.py`:
|
||||
|
||||
```
|
||||
SQLALCHEMY_DATABASE_URI = 'sqlite:////new/location/superset.db'
|
||||
```
|
||||
|
||||
You can read more about customizing Superset using the configuration file
|
||||
[here](/docs/installation/configuring-superset).
|
||||
|
||||
### What if the table schema changed?
|
||||
|
||||
Table schemas evolve, and Superset needs to reflect that. It’s pretty common in the life cycle of a
|
||||
dashboard to want to add a new dimension or metric. To get Superset to discover your new columns,
|
||||
all you have to do is to go to **Menu -> Sources -> Tables**, click the edit icon next to the table
|
||||
who’s schema has changed, and hit **Save** from the **Detail** tab. Behind the scene, the new
|
||||
columns will get merged it. Following this, you may want to re-edit the table afterwards to
|
||||
configure the Column tab, check the appropriate boxes and save again.
|
||||
|
||||
### What database engine can I use as a backend for Superset?
|
||||
|
||||
To clarify, the database backend is an OLTP database used by Superset to store its internal
|
||||
information like your list of users, slices and dashboard definitions.
|
||||
|
||||
Superset is tested using Mysql, Postgresql and Sqlite for its backend. It’s recommended you install
|
||||
Superset on one of these database server for production.
|
||||
|
||||
Using a column-store, non-OLTP databases like Vertica, Redshift or Presto as a database backend
|
||||
simply won’t work as these databases are not designed for this type of workload. Installation on
|
||||
Oracle, Microsoft SQL Server, or other OLTP databases may work but isn’t tested.
|
||||
|
||||
Please note that pretty much any databases that have a SqlAlchemy integration should work perfectly
|
||||
fine as a datasource for Superset, just not as the OLTP backend.
|
||||
|
||||
### How can I configure OAuth authentication and authorization?
|
||||
|
||||
You can take a look at this Flask-AppBuilder
|
||||
[configuration example](https://github.com/dpgaspar/Flask-AppBuilder/blob/master/examples/oauth/config.py).
|
||||
|
||||
### How can I set a default filter on my dashboard?
|
||||
|
||||
Simply apply the filter and save the dashboard while the filter is active.
|
||||
|
||||
### How do I get Superset to refresh the schema of my table?
|
||||
|
||||
When adding columns to a table, you can have Superset detect and merge the new columns in by using
|
||||
the “Refresh Metadata” action in the **Source -> Tables** page. Simply check the box next to the
|
||||
tables you want the schema refreshed, and click **Actions -> Refresh Metadata**.
|
||||
|
||||
### Is there a way to force the use specific colors?
|
||||
|
||||
It is possible on a per-dashboard basis by providing a mapping of labels to colors in the JSON
|
||||
Metadata attribute using the `label_colors` key.
|
||||
|
||||
```
|
||||
{
|
||||
"label_colors": {
|
||||
"Girls": "#FF69B4",
|
||||
"Boys": "#ADD8E6"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Does Superset work with [insert database engine here]?
|
||||
|
||||
The [Connecting to Databases section](/docs/databases/installing-database-drivers) provides the best
|
||||
overview for supported databases. Database engines not listed on that page may work too. We rely on
|
||||
the community to contribute to this knowledge base.
|
||||
|
||||
For a database engine to be supported in Superset through the SQLAlchemy connector, it requires
|
||||
having a Python compliant [SQLAlchemy dialect](https://docs.sqlalchemy.org/en/13/dialects/) as well
|
||||
as a [DBAPI driver](https://www.python.org/dev/peps/pep-0249/) defined. Database that have limited
|
||||
SQL support may work as well. For instance it’s possible to connect to Druid through the SQLAlchemy
|
||||
connector even though Druid does not support joins and subqueries. Another key element for a
|
||||
database to be supported is through the Superset Database Engine Specification interface. This
|
||||
interface allows for defining database-specific configurations and logic that go beyond the
|
||||
SQLAlchemy and DBAPI scope. This includes features like:
|
||||
|
||||
- date-related SQL function that allow Superset to fetch different time granularities when running
|
||||
time-series queries
|
||||
- whether the engine supports subqueries. If false, Superset may run 2-phase queries to compensate
|
||||
for the limitation
|
||||
- methods around processing logs and inferring the percentage of completion of a query
|
||||
- technicalities as to how to handle cursors and connections if the driver is not standard DBAPI
|
||||
|
||||
Beyond the SQLAlchemy connector, it’s also possible, though much more involved, to extend Superset
|
||||
and write your own connector. The only example of this at the moment is the Druid connector, which
|
||||
is getting superseded by Druid’s growing SQL support and the recent availability of a DBAPI and
|
||||
SQLAlchemy driver. If the database you are considering integrating has any kind of of SQL support,
|
||||
it’s probably preferable to go the SQLAlchemy route. Note that for a native connector to be possible
|
||||
the database needs to have support for running OLAP-type queries and should be able to things that
|
||||
are typical in basic SQL:
|
||||
|
||||
- aggregate data
|
||||
- apply filters
|
||||
- apply HAVING-type filters
|
||||
- be schema-aware, expose columns and types
|
||||
|
||||
### Does Superset offer a public API?
|
||||
|
||||
Yes, a public REST API, and the surface of that API formal is expanding steadily. Some of the
|
||||
original vision for the collection of endpoints under **/api/v1** was originally specified in
|
||||
[SIP-17](https://github.com/apache/incubator-superset/issues/7259) and constant progress has been
|
||||
made to cover more and more use cases.
|
||||
|
||||
The API available is documented using [Swagger](https://swagger.io/) and the documentation can be
|
||||
made available under **/swagger/v1** by enabling the following flag in `superset_config.py`:
|
||||
|
||||
```
|
||||
FAB_API_SWAGGER_UI = True
|
||||
```
|
||||
|
||||
There are other undocumented [private] ways to interact with Superset programmatically that offer no
|
||||
guarantees and are not recommended but may fit your use case temporarily:
|
||||
|
||||
- using the ORM (SQLAlchemy) directly
|
||||
- using the internal FAB ModelView API (to be deprecated in Superset)
|
||||
- altering the source code in your fork
|
||||
118
docs/src/pages/docs/installation/async_queries_celery.mdx
Normal file
@@ -0,0 +1,118 @@
|
||||
---
|
||||
name: Async Queries via Celery
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/async-queries-celery
|
||||
index: 9
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Async Queries via Celery
|
||||
|
||||
### Celery
|
||||
|
||||
On large analytic databases, it’s common to run queries that execute for minutes or hours. To enable
|
||||
support for long running queries that execute beyond the typical web request’s timeout (30-60
|
||||
seconds), it is necessary to configure an asynchronous backend for Superset which consists of:
|
||||
|
||||
- one or many Superset workers (which is implemented as a Celery worker), and can be started with
|
||||
the `celery worker` command, run `celery worker --help` to view the related options.
|
||||
- a celery broker (message queue) for which we recommend using Redis or RabbitMQ
|
||||
- a results backend that defines where the worker will persist the query results
|
||||
|
||||
Configuring Celery requires defining a `CELERY_CONFIG` in your `superset_config.py`. Both the worker
|
||||
and web server processes should have the same configuration.
|
||||
|
||||
```python
|
||||
class CeleryConfig(object):
|
||||
BROKER_URL = 'redis://localhost:6379/0'
|
||||
CELERY_IMPORTS = (
|
||||
'superset.sql_lab',
|
||||
'superset.tasks',
|
||||
)
|
||||
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
|
||||
CELERYD_LOG_LEVEL = 'DEBUG'
|
||||
CELERYD_PREFETCH_MULTIPLIER = 10
|
||||
CELERY_ACKS_LATE = True
|
||||
CELERY_ANNOTATIONS = {
|
||||
'sql_lab.get_sql_results': {
|
||||
'rate_limit': '100/s',
|
||||
},
|
||||
'email_reports.send': {
|
||||
'rate_limit': '1/s',
|
||||
'time_limit': 120,
|
||||
'soft_time_limit': 150,
|
||||
'ignore_result': True,
|
||||
},
|
||||
}
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
'email_reports.schedule_hourly': {
|
||||
'task': 'email_reports.schedule_hourly',
|
||||
'schedule': crontab(minute=1, hour='*'),
|
||||
},
|
||||
}
|
||||
|
||||
CELERY_CONFIG = CeleryConfig
|
||||
```
|
||||
|
||||
To start a Celery worker to leverage the configuration, run the following command:
|
||||
|
||||
```
|
||||
celery worker --app=superset.tasks.celery_app:app --pool=prefork -O fair -c 4
|
||||
```
|
||||
|
||||
To start a job which schedules periodic background jobs, run the following command:
|
||||
|
||||
```
|
||||
celery beat --app=superset.tasks.celery_app:app
|
||||
```
|
||||
|
||||
To setup a result backend, you need to pass an instance of a derivative of from
|
||||
cachelib.base.BaseCache to the RESULTS_BACKEND configuration key in your superset_config.py. You can
|
||||
use Memcached, Redis, S3 (https://pypi.python.org/pypi/s3werkzeugcache), memory or the file system
|
||||
(in a single server-type setup or for testing), or to write your own caching interface. Your
|
||||
`superset_config.py` may look something like:
|
||||
|
||||
```python
|
||||
# On S3
|
||||
from s3cache.s3cache import S3Cache
|
||||
S3_CACHE_BUCKET = 'foobar-superset'
|
||||
S3_CACHE_KEY_PREFIX = 'sql_lab_result'
|
||||
RESULTS_BACKEND = S3Cache(S3_CACHE_BUCKET, S3_CACHE_KEY_PREFIX)
|
||||
|
||||
# On Redis
|
||||
from cachelib.redis import RedisCache
|
||||
RESULTS_BACKEND = RedisCache(
|
||||
host='localhost', port=6379, key_prefix='superset_results')
|
||||
```
|
||||
|
||||
For performance gains, [MessagePack](https://github.com/msgpack/msgpack-python) and
|
||||
[PyArrow](https://arrow.apache.org/docs/python/) are now used for results serialization. This can be
|
||||
disabled by setting `RESULTS_BACKEND_USE_MSGPACK = False` in your `superset_config.py`, should any
|
||||
issues arise. Please clear your existing results cache store when upgrading an existing environment.
|
||||
|
||||
**Important Notes**
|
||||
|
||||
- It is important that all the worker nodes and web servers in the Superset cluster _share a common
|
||||
metadata database_. This means that SQLite will not work in this context since it has limited
|
||||
support for concurrency and typically lives on the local file system.
|
||||
|
||||
- There should _only be one instance of celery beat running_ in your entire setup. If not,
|
||||
background jobs can get scheduled multiple times resulting in weird behaviors like duplicate
|
||||
delivery of reports, higher than expected load / traffic etc.
|
||||
|
||||
- SQL Lab will _only run your queries asynchronously if_ you enable **Asynchronous Query Execution**
|
||||
in your database settings (Sources > Databases > Edit record).
|
||||
|
||||
### Celery Flower
|
||||
|
||||
Flower is a web based tool for monitoring the Celery cluster which you can install from pip:
|
||||
|
||||
```python
|
||||
pip install flower
|
||||
```
|
||||
|
||||
You can run flower using:
|
||||
|
||||
```
|
||||
celery flower --app=superset.tasks.celery_app:app
|
||||
```
|
||||
209
docs/src/pages/docs/installation/building-viz-plugins.mdx
Normal file
@@ -0,0 +1,209 @@
|
||||
---
|
||||
name: Building Custom Viz Plugins
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/building-custom-viz-plugins
|
||||
index: 11
|
||||
version: 1
|
||||
---
|
||||
|
||||
This is a tutorial to help you build a "Hello World" viz plugin. The intent is to provide a basic
|
||||
scaffolding to build any sort of data visualization, using any viz libary you'd like (e.g. ECharts,
|
||||
AntV, HighCharts, VX, and D3.).
|
||||
|
||||
You can build the Hello World plugin by running a [Yeoman](https://yeoman.io/) generator, which
|
||||
takes a few simple options, and provides this plugin scaffolding.
|
||||
|
||||
## Getting Set Up
|
||||
|
||||
### Install Yeoman and the Superset Package Generator
|
||||
|
||||
This Hello World plugin we'll be building is generated automatically with
|
||||
[Yeoman](https://yeoman.io/). Let's first get that installed by opening up a terminal and installing
|
||||
both the `yo` module and the
|
||||
[superset package generator](https://github.com/apache-superset/superset-ui/tree/master/packages/generator-superset)
|
||||
(`v0.14.7`) to create the new plugin.
|
||||
|
||||
```
|
||||
npm install -g yo @superset-ui/generator-superset
|
||||
```
|
||||
|
||||
### Install Superset
|
||||
|
||||
There are
|
||||
[complete instructions](https://github.com/apache/incubator-superset#installation-and-configuration)
|
||||
available on the [Superset Github repository](https://github.com/apache/incubator-superset). In a
|
||||
nutshell, the easiest way is to:
|
||||
|
||||
1. Have a Mac or linux-based machine
|
||||
2. Install [Docker](https://docs.docker.com/get-docker/)
|
||||
3. Clone [the repository](https://github.com/apache/incubator-superset) to your computer
|
||||
4. Use your terminal to `cd` into the `incubator-superset` directory
|
||||
5. Run `docker-compose up`
|
||||
6. Open _another_ terminal, and `cd` into `incubator-superset/superset-frontend`
|
||||
7. Run `npm install` to load up all the npm packages.
|
||||
8. Run `npm run dev-server` to spin up the Webpack hot-reloading server
|
||||
9. Wait for it to build, and then open your browser to `http://localhost:9000` and log in with
|
||||
`admin`/`admin`. You're off to the races! (Note: we'll be restarting this later)
|
||||
|
||||
### Install Superset-UI
|
||||
|
||||
1. Clone [the `superset-ui` repository](https://github.com/apache-superset/superset-ui) to your
|
||||
computer. It can sit in the same parent directory as your `incubator-superset` repo
|
||||
2. Use your terminal to `cd` into `superset-ui`
|
||||
3. Run `yarn install` and wait for all the packages to get installed
|
||||
|
||||
## Build Your "Hello, World"
|
||||
|
||||
### ~~Write~~ _generate_ some code!
|
||||
|
||||
1. Using your terminal, `cd` into your local `superset-ui` repo folder and then into the `plugins`
|
||||
subdirectory.
|
||||
2. Make a new directory for your plugin, i.e. `mkdir plugin-chart-hello-world`. **Note:** we
|
||||
_highly_ recommend following the `plugin-chart-your-plugin-name` pattern.
|
||||
3. Now `cd plugin-chart-hello-world`
|
||||
4. Finally, run `yo @superset-ui/superset`
|
||||
5. Select `Create superset-ui chart plugin package` on the following screen:
|
||||
|
||||
<img src="/images/plugin-1-yeoman-select.png" />{' '}
|
||||
|
||||
6. Give it a name (in our case, go with the default, based on the folder name):
|
||||
|
||||
<img src="/images/plugin-2-yeoman-package-name.png" />
|
||||
|
||||
7. Give it a description (again, default is fine!)
|
||||
|
||||
<img src="/images/plugin-3-yeoman-description.png" />{' '}
|
||||
|
||||
8. Choose which type of React component you want to make (Class, or Function component).
|
||||
|
||||
<img src="/images/plugin-4-yeoman-component-type.png" />{' '}
|
||||
|
||||
9. Select whether you'd like your visualization to be timeseries-based or not
|
||||
|
||||
<img src="/images/plugin-5-yeoman-timeseries.png" />{' '}
|
||||
|
||||
10. Select whether or not you want to include badges at the top of your README file (really only
|
||||
needed if you intend to contribute your plugin to the `superset-ui` repo).
|
||||
|
||||
<img src="/images/plugin-6-yeoman-badges.png" />{' '}
|
||||
|
||||
11. Admire all the files the generator has created for you. Note that EACH of these is chock full of
|
||||
comments about what they're for, and how best to use them.
|
||||
|
||||
<img src="/images/plugin-7-yeoman-files.png" />{' '}
|
||||
|
||||
### Add your Plugin to Superset (with NPM Link)
|
||||
|
||||
Now, we want to see this thing actually RUN! To do that, we'll add your package to Superset and
|
||||
embrace the magic power of `npm link` to see it in-situ, without needing to **build** the plugin, or
|
||||
open any PRs on Github.
|
||||
|
||||
1. Add your package to the `package.json` file in `incubator-superset/superset-frontend`.
|
||||
|
||||
<img src="/images/plugin-8-package-json.png" />{' '}
|
||||
|
||||
Note: Do _not_ run `npm install`... explanation below.
|
||||
|
||||
2. Add your plugin to the `MainPreset.js` file (located in
|
||||
`incubator-superset/superset-frontend/src/visualizations/presets/MainPreset.js`) in two places,
|
||||
alongside the other plugins.
|
||||
|
||||
<img src="/images/plugin-9-mainpreset-import.png" />{' '}
|
||||
|
||||
{' '}
|
||||
<img src="/images/plugin-9-mainpreset-register.png" />
|
||||
|
||||
3. Open a terminal window to `incubator-superset/superset-frontend`. If you did the Install Superset
|
||||
steps above, you may still have webpack running there, and you can just stop it with `ctrol-c`.
|
||||
If not, just open a new window and or `cd` to that directory path.
|
||||
|
||||
4) Use `npm link` to symlink plugin, using a relative path to `superset-ui` and your plugin folder,
|
||||
e.g. `npm link ../../superset-ui/plugins/plugin-chart-hello-world`.
|
||||
|
||||
5. Restart your webpack dev server with `npm run dev-server`. You'll know it worked if you see a
|
||||
line stating
|
||||
`[Superset Plugin] Use symlink source for @superset-ui/plugin-chart-hello-world @ ^0.0.0`.
|
||||
|
||||
**NOTE:** If/when you do an `npm install` that erases the symlink generated by `npm link`, so you'll
|
||||
have to redo those steps.
|
||||
|
||||
**NOTE:** Dynamic import is a work in progress. We hope you won't even need to DO this soon. We'll
|
||||
be blogging again when that day comes, we assure you. In short, we have a goal to make editing
|
||||
`package.json` and `MainPreset.js` unnecessary, so all the code changes are made in ONE repo.
|
||||
|
||||
### See it with your own eyes!
|
||||
|
||||
You should now be able to go to the Explore view in your local Superset and add a new chart! You'll
|
||||
see your new plugin when you go to select your viz type.
|
||||
|
||||
<img src="/images/plugin-10-hello-thumbnail.png" />{' '}
|
||||
|
||||
Now you can load up some data, and you'll see it appear in the plugin!
|
||||
|
||||
<img src="/images/plugin-11-explore-view.png" />{' '}
|
||||
|
||||
The plugin also outputs three things to your browser's console:
|
||||
|
||||
- `formData`, a.k.a. everything sent into your viz from the controls
|
||||
- `props`, as output from the `transformProps` file for your plugin's consumption
|
||||
- The actual HTML element, which your plugin has hooks into for any necessary DOM maniupluation
|
||||
|
||||
<img src="/images/plugin-12-console-logs.png" />{' '}
|
||||
|
||||
## Make it Your Own
|
||||
|
||||
Now you're free to run wild with your new plugin! Here are a few places to start digging in:
|
||||
|
||||
### Read the comments and docs
|
||||
|
||||
Take a look through the full file tree of the plugin. The Readme gives details for the job of each
|
||||
file. EACH of these files has been annotated with extensive comments of what the file is for, and
|
||||
the basics of what you can do with it.
|
||||
|
||||
### Take control!
|
||||
|
||||
The plugin includes a couple of example controls, but you can certainly continue to add as many as
|
||||
you need to. The comments/documentation within the controls file is a start, but we recommend
|
||||
looking at existing `superset-ui` plugins for more examples of how you can implement controls to
|
||||
enhance your queries, work with your data, and change your visualization's display.
|
||||
|
||||
### Build the perfect query
|
||||
|
||||
The `buildQuery` file where your plugin actually fetches data from the Superset backend. This file
|
||||
builds he query "context" for your plugin. For a simple plugin, this file needn't do much. There are
|
||||
a couple changes that need to be made for a timeseries plugin, thus the option in the Yeoman
|
||||
generator.
|
||||
|
||||
This file also allows you to add various post-processing operations, to have the Superset backend
|
||||
process your data in various ways (pivoting, etc), but that's a whole other topic we'll cover
|
||||
separately in the near future.
|
||||
|
||||
### Style with Emotion
|
||||
|
||||
Each of these methods lets you add custom CSS styles using Emotion 👩🎤(a CSS-in-JS approach) which
|
||||
has access to Superset's burgeoning set of theme variables, and also automatically scopes the styles
|
||||
to your plugin, so they don't "leak" to other areas of Superset.
|
||||
|
||||
In the Hello World plugin, we've included a few example Theme variables (`colors`, `gridUnit`s, and
|
||||
typographic weights/sizes). We'll be continuing to add more variables to this theme file as we
|
||||
continue to push Superset (and the viz plugins) toward the standards of the Superset redesign (see
|
||||
[SIP-34](https://github.com/apache/incubator-superset/issues/8976))
|
||||
|
||||
### Give it a thumbnail
|
||||
|
||||
Because come on... that's the fun part, right?
|
||||
|
||||
### Build it!
|
||||
|
||||
In this tutorial, you built your plugin in the `superset-ui` repo. This means you can use the
|
||||
built-in build scripts that the repo provides. With your terminal of choice, simply `cd` into the
|
||||
root directory of `supeset-ui` and run `yarn build`. This will kick off a build of ALL the Superset
|
||||
plugins and packages, including yours.
|
||||
|
||||
### Test early, test often!
|
||||
|
||||
The Hello World plugin includes some basic Jest tests to act as a starting point to add unit tests
|
||||
to your plugin. These do a quick sanity check that the plugin actually loads correctly, and then run
|
||||
through the basics of making sure that your controls are properly respected by modifying the
|
||||
resulting data and/or props of the plugin. Running `yarn test` from the root directory of
|
||||
`superset-ui` will run all the tests for plugins/packages, including your Hello World.
|
||||
136
docs/src/pages/docs/installation/caching.mdx
Normal file
@@ -0,0 +1,136 @@
|
||||
---
|
||||
name: Caching
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/cache
|
||||
index: 5
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Caching
|
||||
|
||||
Superset uses [Flask-Cache](https://pythonhosted.org/Flask-Cache/) for caching purpose. Configuring
|
||||
your caching backend is as easy as providing a `CACHE_CONFIG`, constant in your `superset_config.py`
|
||||
that complies with the Flask-Cache specifications.
|
||||
|
||||
Flask-Cache supports multiple caching backends (Redis, Memcached, SimpleCache (in-memory), or the
|
||||
local filesystem).
|
||||
|
||||
- Memcached: we recommend using [pylibmc](https://pypi.org/project/pylibmc/) client library as
|
||||
`python-memcached` does not handle storing binary data correctly.
|
||||
- Redis: we recommend the [redis](https://pypi.python.org/pypi/redis) Python package
|
||||
|
||||
Both of these libraries can be installed using pip.
|
||||
|
||||
For setting your timeouts, this is done in the Superset metadata and goes up the “timeout
|
||||
searchpath”, from your slice configuration, to your data source’s configuration, to your database’s
|
||||
and ultimately falls back into your global default defined in `CACHE_CONFIG`.
|
||||
|
||||
```
|
||||
CACHE_CONFIG = {
|
||||
'CACHE_TYPE': 'redis',
|
||||
'CACHE_DEFAULT_TIMEOUT': 60 * 60 * 24, # 1 day default (in secs)
|
||||
'CACHE_KEY_PREFIX': 'superset_results',
|
||||
'CACHE_REDIS_URL': 'redis://localhost:6379/0',
|
||||
}
|
||||
```
|
||||
|
||||
It is also possible to pass a custom cache initialization function in the config to handle
|
||||
additional caching use cases. The function must return an object that is compatible with the
|
||||
[Flask-Cache API](https://pythonhosted.org/Flask-Cache/).
|
||||
|
||||
```python
|
||||
from custom_caching import CustomCache
|
||||
|
||||
def init_cache(app):
|
||||
"""Takes an app instance and returns a custom cache backend"""
|
||||
config = {
|
||||
'CACHE_DEFAULT_TIMEOUT': 60 * 60 * 24, # 1 day default (in secs)
|
||||
'CACHE_KEY_PREFIX': 'superset_results',
|
||||
}
|
||||
return CustomCache(app, config)
|
||||
|
||||
CACHE_CONFIG = init_cache
|
||||
```
|
||||
|
||||
Superset has a Celery task that will periodically warm up the cache based on different strategies.
|
||||
To use it, add the following to the `CELERYBEAT_SCHEDULE` section in `config.py`:
|
||||
|
||||
```python
|
||||
CELERYBEAT_SCHEDULE = {
|
||||
'cache-warmup-hourly': {
|
||||
'task': 'cache-warmup',
|
||||
'schedule': crontab(minute=0, hour='*'), # hourly
|
||||
'kwargs': {
|
||||
'strategy_name': 'top_n_dashboards',
|
||||
'top_n': 5,
|
||||
'since': '7 days ago',
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
This will cache all the charts in the top 5 most popular dashboards every hour. For other
|
||||
strategies, check the `superset/tasks/cache.py` file.
|
||||
|
||||
### Caching Thumbnails
|
||||
|
||||
This is an optional feature that can be turned on by activating it’s feature flag on config:
|
||||
|
||||
```
|
||||
FEATURE_FLAGS = {
|
||||
"THUMBNAILS": True,
|
||||
"THUMBNAILS_SQLA_LISTENERS": True,
|
||||
}
|
||||
```
|
||||
|
||||
For this feature you will need a cache system and celery workers. All thumbnails are store on cache
|
||||
and are processed asynchronously by the workers.
|
||||
|
||||
An example config where images are stored on S3 could be:
|
||||
|
||||
```python
|
||||
from flask import Flask
|
||||
from s3cache.s3cache import S3Cache
|
||||
|
||||
...
|
||||
|
||||
class CeleryConfig(object):
|
||||
BROKER_URL = "redis://localhost:6379/0"
|
||||
CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks", "superset.tasks.thumbnails")
|
||||
CELERY_RESULT_BACKEND = "redis://localhost:6379/0"
|
||||
CELERYD_PREFETCH_MULTIPLIER = 10
|
||||
CELERY_ACKS_LATE = True
|
||||
|
||||
|
||||
CELERY_CONFIG = CeleryConfig
|
||||
|
||||
def init_thumbnail_cache(app: Flask) -> S3Cache:
|
||||
return S3Cache("bucket_name", 'thumbs_cache/')
|
||||
|
||||
|
||||
THUMBNAIL_CACHE_CONFIG = init_thumbnail_cache
|
||||
# Async selenium thumbnail task will use the following user
|
||||
THUMBNAIL_SELENIUM_USER = "Admin"
|
||||
```
|
||||
|
||||
Using the above example cache keys for dashboards will be `superset_thumb__dashboard__{ID}`. You can
|
||||
override the base URL for selenium using:
|
||||
|
||||
```
|
||||
WEBDRIVER_BASEURL = "https://superset.company.com"
|
||||
```
|
||||
|
||||
Additional selenium web drive configuration can be set using `WEBDRIVER_CONFIGURATION`. You can
|
||||
implement a custom function to authenticate selenium. The default function uses the `flask-login`
|
||||
session cookie. Here's an example of a custom function signature:
|
||||
|
||||
```python
|
||||
def auth_driver(driver: WebDriver, user: "User") -> WebDriver:
|
||||
pass
|
||||
```
|
||||
|
||||
Then on configuration:
|
||||
|
||||
```
|
||||
WEBDRIVER_AUTH_FUNC = auth_driver
|
||||
```
|
||||
282
docs/src/pages/docs/installation/configuring.mdx
Normal file
@@ -0,0 +1,282 @@
|
||||
---
|
||||
name: Configuring Superset
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/configuring-superset
|
||||
index: 3
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Configuring Superset
|
||||
|
||||
### Configuration
|
||||
|
||||
To configure your application, you need to create a file `superset_config.py` and add it to your
|
||||
`PYTHONPATH`. Here are some of the parameters you can set in that file:
|
||||
|
||||
```
|
||||
# Superset specific config
|
||||
ROW_LIMIT = 5000
|
||||
|
||||
SUPERSET_WEBSERVER_PORT = 8088
|
||||
|
||||
# Flask App Builder configuration
|
||||
# Your App secret key
|
||||
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h'
|
||||
|
||||
# The SQLAlchemy connection string to your database backend
|
||||
# This connection defines the path to the database that stores your
|
||||
# superset metadata (slices, connections, tables, dashboards, ...).
|
||||
# Note that the connection information to connect to the datasources
|
||||
# you want to explore are managed directly in the web UI
|
||||
SQLALCHEMY_DATABASE_URI = 'sqlite:////path/to/superset.db'
|
||||
|
||||
# Flask-WTF flag for CSRF
|
||||
WTF_CSRF_ENABLED = True
|
||||
# Add endpoints that need to be exempt from CSRF protection
|
||||
WTF_CSRF_EXEMPT_LIST = []
|
||||
# A CSRF token that expires in 1 year
|
||||
WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 365
|
||||
|
||||
# Set this API key to enable Mapbox visualizations
|
||||
MAPBOX_API_KEY = ''
|
||||
```
|
||||
|
||||
All the parameters and default values defined in
|
||||
[https://github.com/apache/incubator-superset/blob/master/superset/config.py](https://github.com/apache/incubator-superset/blob/master/superset/config.py)
|
||||
can be altered in your local `superset_config.py`. Administrators will want to read through the file
|
||||
to understand what can be configured locally as well as the default values in place.
|
||||
|
||||
Since `superset_config.py` acts as a Flask configuration module, it can be used to alter the
|
||||
settings Flask itself, as well as Flask extensions like `flask-wtf`, `flask-cache`, `flask-migrate`,
|
||||
and `flask-appbuilder`. Flask App Builder, the web framework used by Superset, offers many
|
||||
configuration settings. Please consult the
|
||||
[Flask App Builder Documentation](https://flask-appbuilder.readthedocs.org/en/latest/config.html)
|
||||
for more information on how to configure it.
|
||||
|
||||
Make sure to change:
|
||||
|
||||
- `SQLALCHEMY_DATABASE_URI`: by default it is stored at ~/.superset/superset.db
|
||||
- `SECRET_KEY`: to a long random string
|
||||
|
||||
If you need to exempt endpoints from CSRF (e.g. if you are running a custom auth postback endpoint),
|
||||
you can add the endpoints to `WTF_CSRF_EXEMPT_LIST`:
|
||||
|
||||
```
|
||||
WTF_CSRF_EXEMPT_LIST = [‘’]
|
||||
```
|
||||
|
||||
### Flask AppBuilder Permissions
|
||||
|
||||
By default, every time the Flask-AppBuilder (FAB) app is initialized the permissions and views are
|
||||
added automatically to the backend and associated with the ‘Admin’ role. The issue, however, is when
|
||||
you are running multiple concurrent workers this creates a lot of contention and race conditions
|
||||
when defining permissions and views.
|
||||
|
||||
To alleviate this issue, the automatic updating of permissions can be disabled by setting
|
||||
`FAB_UPDATE_PERMS = False` (defaults to True).
|
||||
|
||||
In a production environment initialization could take on the following form:
|
||||
|
||||
```
|
||||
superset init gunicorn -w 10 … superset:app
|
||||
```
|
||||
|
||||
### Running on a WSGI HTTP Server
|
||||
|
||||
While you can run Superset on NGINX or Apache, we recommend using Gunicorn in async mode. This
|
||||
enables impressive concurrency even and is fairly easy to install and configure. Please refer to the
|
||||
documentation of your preferred technology to set up this Flask WSGI application in a way that works
|
||||
well in your environment. Here’s an async setup known to work well in production:
|
||||
|
||||
```
|
||||
-w 10 \
|
||||
-k gevent \
|
||||
--timeout 120 \
|
||||
-b 0.0.0.0:6666 \
|
||||
--limit-request-line 0 \
|
||||
--limit-request-field_size 0 \
|
||||
--statsd-host localhost:8125 \
|
||||
"superset.app:create_app()"
|
||||
```
|
||||
|
||||
Refer to the [Gunicorn documentation](https://docs.gunicorn.org/en/stable/design.html) for more
|
||||
information. _Note that the development web server (`superset run` or `flask run`) is not intended
|
||||
for production use._
|
||||
|
||||
If you're not using Gunicorn, you may want to disable the use of `flask-compress` by setting
|
||||
`COMPRESS_REGISTER = False` in your `superset_config.py`.
|
||||
|
||||
### Configuration Behind a Load Balancer
|
||||
|
||||
If you are running superset behind a load balancer or reverse proxy (e.g. NGINX or ELB on AWS), you
|
||||
may need to utilize a healthcheck endpoint so that your load balancer knows if your superset
|
||||
instance is running. This is provided at `/health` which will return a 200 response containing “OK”
|
||||
if the the webserver is running.
|
||||
|
||||
If the load balancer is inserting `X-Forwarded-For/X-Forwarded-Proto` headers, you should set
|
||||
`ENABLE_PROXY_FIX = True` in the superset config file (`superset_config.py`) to extract and use the
|
||||
headers.
|
||||
|
||||
In case the reverse proxy is used for providing SSL encryption, an explicit definition of the
|
||||
`X-Forwarded-Proto` may be required. For the Apache webserver this can be set as follows:
|
||||
|
||||
```
|
||||
RequestHeader set X-Forwarded-Proto "https"
|
||||
```
|
||||
|
||||
### Custom OAuth2 Configuration
|
||||
|
||||
Beyond FAB supported providers (Github, Twitter, LinkedIn, Google, Azure, etc), its easy to connect
|
||||
Superset with other OAuth2 Authorization Server implementations that support “code” authorization.
|
||||
|
||||
First, configure authorization in Superset `superset_config.py`.
|
||||
|
||||
```python
|
||||
AUTH_TYPE = AUTH_OAUTH
|
||||
OAUTH_PROVIDERS = [
|
||||
{ 'name':'egaSSO',
|
||||
'token_key':'access_token', # Name of the token in the response of access_token_url
|
||||
'icon':'fa-address-card', # Icon for the provider
|
||||
'remote_app': {
|
||||
'consumer_key':'myClientId', # Client Id (Identify Superset application)
|
||||
'consumer_secret':'MySecret', # Secret for this Client Id (Identify Superset application)
|
||||
'request_token_params':{
|
||||
'scope': 'read' # Scope for the Authorization
|
||||
},
|
||||
'access_token_method':'POST', # HTTP Method to call access_token_url
|
||||
'access_token_params':{ # Additional parameters for calls to access_token_url
|
||||
'client_id':'myClientId'
|
||||
},
|
||||
'access_token_headers':{ # Additional headers for calls to access_token_url
|
||||
'Authorization': 'Basic Base64EncodedClientIdAndSecret'
|
||||
},
|
||||
'base_url':'https://myAuthorizationServer/oauth2AuthorizationServer/',
|
||||
'access_token_url':'https://myAuthorizationServer/oauth2AuthorizationServer/token',
|
||||
'authorize_url':'https://myAuthorizationServer/oauth2AuthorizationServer/authorize'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Will allow user self registration, allowing to create Flask users from Authorized User
|
||||
AUTH_USER_REGISTRATION = True
|
||||
|
||||
# The default user self registration role
|
||||
AUTH_USER_REGISTRATION_ROLE = "Public"
|
||||
```
|
||||
|
||||
Then, create a `CustomSsoSecurityManager` that extends `SupersetSecurityManager` and overrides
|
||||
`oauth_user_info`:
|
||||
|
||||
```python
|
||||
from superset.security import SupersetSecurityManager
|
||||
|
||||
class CustomSsoSecurityManager(SupersetSecurityManager):
|
||||
|
||||
def oauth_user_info(self, provider, response=None):
|
||||
logging.debug("Oauth2 provider: {0}.".format(provider))
|
||||
if provider == 'egaSSO':
|
||||
# As example, this line request a GET to base_url + '/' + userDetails with Bearer Authentication,
|
||||
# and expects that authorization server checks the token, and response with user details
|
||||
me = self.appbuilder.sm.oauth_remotes[provider].get('userDetails').data
|
||||
logging.debug("user_data: {0}".format(me))
|
||||
return { 'name' : me['name'], 'email' : me['email'], 'id' : me['user_name'], 'username' : me['user_name'], 'first_name':'', 'last_name':''}
|
||||
...
|
||||
```
|
||||
|
||||
This file must be located at the same directory than `superset_config.py` with the name
|
||||
`custom_sso_security_manager.py`. Finally, add the following 2 lines to `superset_config.py`:
|
||||
|
||||
```
|
||||
from custom_sso_security_manager import CustomSsoSecurityManager
|
||||
CUSTOM_SECURITY_MANAGER = CustomSsoSecurityManager
|
||||
```
|
||||
|
||||
### Feature Flags
|
||||
|
||||
To support a diverse set of users, Superset has some features that are not enabled by default. For
|
||||
example, some users have stronger security restrictions, while some others may not. So Superset
|
||||
allow users to enable or disable some features by config. For feature owners, you can add optional
|
||||
functionalities in Superset, but will be only affected by a subset of users.
|
||||
|
||||
You can enable or disable features with flag from `superset_config.py`:
|
||||
|
||||
```python
|
||||
DEFAULT_FEATURE_FLAGS = {
|
||||
'CLIENT_CACHE': False,
|
||||
'ENABLE_EXPLORE_JSON_CSRF_PROTECTION': False,
|
||||
'PRESTO_EXPAND_DATA': False,
|
||||
}
|
||||
```
|
||||
|
||||
Here is a list of flags and descriptions:
|
||||
|
||||
- `ENABLE_EXPLORE_JSON_CSRF_PROTECTION`: For some security concerns, you may need to enforce CSRF
|
||||
protection on all query request to the `explore_json` endpoint. When
|
||||
`ENABLE_EXPLORE_JSON_CSRF_PROTECTION` is set to true, your users cannot make GET request to
|
||||
`explore_json`. The default value for this feature is `False` and `explore_json` will accept both
|
||||
GET and POST request. See [PR 7935](https://github.com/apache/incubator-superset/pull/7935) for
|
||||
more details.
|
||||
|
||||
- `PRESTO_EXPAND_DATA`: When this feature is enabled, nested types in Presto will be expanded into
|
||||
extra columns and / or arrays. This is experimental, and doesn’t work with all nested types.
|
||||
|
||||
### SIP 15
|
||||
|
||||
[Superset Improvement Proposal 15](https://github.com/apache/incubator-superset/issues/6360) aims to
|
||||
ensure that time intervals are handled in a consistent and transparent manner for both the Druid and
|
||||
SQLAlchemy connectors.
|
||||
|
||||
Prior to SIP-15 SQLAlchemy used inclusive endpoints however these may behave like exclusive for
|
||||
string columns (due to lexicographical ordering) if no formatting was defined and the column
|
||||
formatting did not conform to an ISO 8601 date-time (refer to the SIP for details).
|
||||
|
||||
To remedy this rather than having to define the date/time format for every non-IS0 8601 date-time
|
||||
column, once can define a default column mapping on a per database level via the `extra` parameter:
|
||||
|
||||
```
|
||||
{
|
||||
"python_date_format_by_column_name": {
|
||||
"ds": "%Y-%m-%d"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**New Deployments**
|
||||
|
||||
All new deployments should enable SIP-15 by setting this value in `superset_config.py`:
|
||||
|
||||
```
|
||||
SIP_15_ENABLED = True
|
||||
|
||||
```
|
||||
|
||||
**Existing Deployments**
|
||||
|
||||
Given that it is not apparent whether the chart creator was aware of the time range inconsistencies
|
||||
(and adjusted the endpoints accordingly) changing the behavior of all charts is overly aggressive.
|
||||
Instead SIP-15 proivides a soft transistion allowing producers (chart owners) to see the impact of
|
||||
the proposed change and adjust their charts accordingly.
|
||||
|
||||
Prior to enabling SIP-15, existing deployments should communicate to their users the impact of the
|
||||
change and define a grace period end date (exclusive of course) after which all charts will conform
|
||||
to the [start, end) interval.
|
||||
|
||||
```python
|
||||
from dateime import date
|
||||
|
||||
SIP_15_ENABLED = True
|
||||
SIP_15_GRACE_PERIOD_END = date(<YYYY>, <MM>, <DD>)
|
||||
```
|
||||
|
||||
To aid with transparency the current endpoint behavior is explicitly called out in the chart time
|
||||
range (post SIP-15 this will be [start, end) for all connectors and databases). One can override the
|
||||
defaults on a per database level via the `extra` parameter.
|
||||
|
||||
```python
|
||||
{
|
||||
"time_range_endpoints": ["inclusive", "inclusive"]
|
||||
}
|
||||
```
|
||||
|
||||
Note in a future release the interim SIP-15 logic will be removed (including the
|
||||
`time_grain_endpoints` form-data field) via a code change and Alembic migration.
|
||||
174
docs/src/pages/docs/installation/email_reports.mdx
Normal file
@@ -0,0 +1,174 @@
|
||||
---
|
||||
name: Scheduling and Emailing Reports
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/email-reports
|
||||
index: 10
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Scheduling and Emailing Reports
|
||||
|
||||
### Email Reports
|
||||
|
||||
Email reports allow users to schedule email reports for:
|
||||
|
||||
- chart and dashboard visualization (attachment or inline)
|
||||
- chart data (CSV attachment on inline table)
|
||||
|
||||
Enable email reports in your `superset_config.py` file:
|
||||
|
||||
```python
|
||||
ENABLE_SCHEDULED_EMAIL_REPORTS = True
|
||||
```
|
||||
|
||||
Now you will find two new items in the navigation bar that allow you to schedule email reports:
|
||||
|
||||
- **Manage > Dashboard Emails**
|
||||
- **Manage > Chart Email Schedules**
|
||||
|
||||
Schedules are defined in [crontab format](https://crontab.guru/) and each schedule can have a list
|
||||
of recipients (all of them can receive a single mail, or separate mails). For audit purposes, all
|
||||
outgoing mails can have a mandatory BCC.
|
||||
|
||||
In order get picked up you need to configure a celery worker and a celery beat (see section above
|
||||
“Celery Tasks”). Your celery configuration also needs an entry `email_reports.schedule_hourly` for
|
||||
`CELERYBEAT_SCHEDULE`.
|
||||
|
||||
To send emails you need to configure SMTP settings in your `superset_config.py` configuration file.
|
||||
|
||||
```python
|
||||
EMAIL_NOTIFICATIONS = True
|
||||
|
||||
SMTP_HOST = "email-smtp.eu-west-1.amazonaws.com"
|
||||
SMTP_STARTTLS = True
|
||||
SMTP_SSL = False
|
||||
SMTP_USER = "smtp_username"
|
||||
SMTP_PORT = 25
|
||||
SMTP_PASSWORD = os.environ.get("SMTP_PASSWORD")
|
||||
SMTP_MAIL_FROM = "insights@komoot.com"
|
||||
```
|
||||
|
||||
To render dashboards you need to install a local browser on your Superset instance:
|
||||
|
||||
- [geckodriver](https://github.com/mozilla/geckodriver) for Firefox
|
||||
- [chromedriver](http://chromedriver.chromium.org/) for Chrome
|
||||
|
||||
You'll need to adjust the `EMAIL_REPORTS_WEBDRIVER` accordingly in your configuration. You also need
|
||||
to specify on behalf of which username to render the dashboards. In general dashboards and charts
|
||||
are not accessible to unauthorized requests, that is why the worker needs to take over credentials
|
||||
of an existing user to take a snapshot.
|
||||
|
||||
```python
|
||||
EMAIL_REPORTS_USER = 'username_with_permission_to_access_dashboards'
|
||||
```
|
||||
|
||||
**Important notes**
|
||||
|
||||
- Be mindful of the concurrency setting for celery (using `-c 4`). Selenium/webdriver instances can
|
||||
consume a lot of CPU / memory on your servers.
|
||||
- In some cases, if you notice a lot of leaked geckodriver processes, try running your celery
|
||||
processes with `celery worker --pool=prefork --max-tasks-per-child=128 ...`
|
||||
- It is recommended to run separate workers for the `sql_lab` and `email_reports` tasks. This can be
|
||||
done using the `queue` field in `CELERY_ANNOTATIONS`.
|
||||
- Adjust `WEBDRIVER_BASEURL` in your configuration file if celery workers can’t access Superset via
|
||||
its default value of `http://0.0.0.0:8080/`.
|
||||
|
||||
### Schedule Reports
|
||||
|
||||
You can optionally allow your users to schedule queries directly in SQL Lab. This is done by addding
|
||||
extra metadata to saved queries, which are then picked up by an external scheduled (like
|
||||
[Apache Airflow](https://airflow.apache.org/)).
|
||||
|
||||
To allow scheduled queries, add the following to your configuration file:
|
||||
|
||||
```python
|
||||
FEATURE_FLAGS = {
|
||||
# Configuration for scheduling queries from SQL Lab. This information is
|
||||
# collected when the user clicks "Schedule query", and saved into the `extra`
|
||||
# field of saved queries.
|
||||
# See: https://github.com/mozilla-services/react-jsonschema-form
|
||||
'SCHEDULED_QUERIES': {
|
||||
'JSONSCHEMA': {
|
||||
'title': 'Schedule',
|
||||
'description': (
|
||||
'In order to schedule a query, you need to specify when it '
|
||||
'should start running, when it should stop running, and how '
|
||||
'often it should run. You can also optionally specify '
|
||||
'dependencies that should be met before the query is '
|
||||
'executed. Please read the documentation for best practices '
|
||||
'and more information on how to specify dependencies.'
|
||||
),
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'output_table': {
|
||||
'type': 'string',
|
||||
'title': 'Output table name',
|
||||
},
|
||||
'start_date': {
|
||||
'type': 'string',
|
||||
'title': 'Start date',
|
||||
# date-time is parsed using the chrono library, see
|
||||
# https://www.npmjs.com/package/chrono-node#usage
|
||||
'format': 'date-time',
|
||||
'default': 'tomorrow at 9am',
|
||||
},
|
||||
'end_date': {
|
||||
'type': 'string',
|
||||
'title': 'End date',
|
||||
# date-time is parsed using the chrono library, see
|
||||
# https://www.npmjs.com/package/chrono-node#usage
|
||||
'format': 'date-time',
|
||||
'default': '9am in 30 days',
|
||||
},
|
||||
'schedule_interval': {
|
||||
'type': 'string',
|
||||
'title': 'Schedule interval',
|
||||
},
|
||||
'dependencies': {
|
||||
'type': 'array',
|
||||
'title': 'Dependencies',
|
||||
'items': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'UISCHEMA': {
|
||||
'schedule_interval': {
|
||||
'ui:placeholder': '@daily, @weekly, etc.',
|
||||
},
|
||||
'dependencies': {
|
||||
'ui:help': (
|
||||
'Check the documentation for the correct format when '
|
||||
'defining dependencies.'
|
||||
),
|
||||
},
|
||||
},
|
||||
'VALIDATION': [
|
||||
# ensure that start_date <= end_date
|
||||
{
|
||||
'name': 'less_equal',
|
||||
'arguments': ['start_date', 'end_date'],
|
||||
'message': 'End date cannot be before start date',
|
||||
# this is where the error message is shown
|
||||
'container': 'end_date',
|
||||
},
|
||||
],
|
||||
# link to the scheduler; this example links to an Airflow pipeline
|
||||
# that uses the query id and the output table as its name
|
||||
'linkback': (
|
||||
'https://airflow.example.com/admin/airflow/tree?'
|
||||
'dag_id=query_${id}_${extra_json.schedule_info.output_table}'
|
||||
),
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
This feature flag is based on
|
||||
[react-jsonschema-form](https://github.com/mozilla-services/react-jsonschema-form) and will add a
|
||||
button called “Schedule Query” to SQL Lab. When the button is clicked, a modal will show up where
|
||||
the user can add the metadata required for scheduling the query.
|
||||
|
||||
This information can then be retrieved from the endpoint `/savedqueryviewapi/api/read` and used to
|
||||
schedule the queries that have `scheduled_queries` in their JSON metadata. For schedulers other than
|
||||
Airflow, additional fields can be easily added to the configuration file above.
|
||||
59
docs/src/pages/docs/installation/event-logging-page.mdx
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
name: Event Logging
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/event-logging
|
||||
index: 6
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Logging
|
||||
|
||||
### Event Logging
|
||||
|
||||
Superset by default logs special action events in its internal database. These logs can be accessed
|
||||
on the UI by navigating to **Security > Action Log**. You can freely customize these logs by
|
||||
implementing your own event log class.
|
||||
|
||||
Here's an example of a simple JSON-to-stdout class:
|
||||
|
||||
```python
|
||||
def log(self, user_id, action, *args, **kwargs):
|
||||
records = kwargs.get('records', list())
|
||||
dashboard_id = kwargs.get('dashboard_id')
|
||||
slice_id = kwargs.get('slice_id')
|
||||
duration_ms = kwargs.get('duration_ms')
|
||||
referrer = kwargs.get('referrer')
|
||||
|
||||
for record in records:
|
||||
log = dict(
|
||||
action=action,
|
||||
json=record,
|
||||
dashboard_id=dashboard_id,
|
||||
slice_id=slice_id,
|
||||
duration_ms=duration_ms,
|
||||
referrer=referrer,
|
||||
user_id=user_id
|
||||
)
|
||||
print(json.dumps(log))
|
||||
```
|
||||
|
||||
End by updating your config to pass in an instance of the logger you want to use:
|
||||
|
||||
```
|
||||
EVENT_LOGGER = JSONStdOutEventLogger()
|
||||
```
|
||||
|
||||
### StatsD Logging
|
||||
|
||||
Superset can be instrumented to log events to StatsD if desired. Most endpoints hit are logged as
|
||||
well as key events like query start and end in SQL Lab.
|
||||
|
||||
To setup StatsD logging, it’s a matter of configuring the logger in your `superset_config.py`.
|
||||
|
||||
```python
|
||||
from superset.stats_logger import StatsdStatsLogger
|
||||
STATS_LOGGER = StatsdStatsLogger(host='localhost', port=8125, prefix='superset')
|
||||
```
|
||||
|
||||
Note that it’s also possible to implement you own logger by deriving
|
||||
`superset.stats_logger.BaseStatsLogger`.
|
||||
89
docs/src/pages/docs/installation/index.mdx
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
name: Installing Locally Using Docker Compose
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/installing-superset-using-docker-compose
|
||||
index: 1
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Installing Superset Locally Using Docker Compose
|
||||
|
||||
The fastest way to try Superset locally is using Docker and Docker Compose on a Linux or Mac OSX
|
||||
computer. Superset does not have official support for Windows, so we have provided a VM workaround
|
||||
below.
|
||||
|
||||
### 1. Install a Docker Engine and Docker Compose
|
||||
|
||||
**Mac OSX**
|
||||
|
||||
[Install Docker for Mac](https://docs.docker.com/docker-for-mac/install/), which includes the Docker
|
||||
engine and a recent version of `docker-compose` out of the box.
|
||||
|
||||
Once you have Docker for Mac installed, open up the preferences pane for Docker, go to the
|
||||
"Resources" section and increase the allocated memory to 6GB. With only the 2GB of RAM allocated by
|
||||
default, Superset will fail to start.
|
||||
|
||||
**Linux**
|
||||
|
||||
[Install Docker on Linux](https://docs.docker.com/engine/install/) by following Docker’s
|
||||
instructions for whichever flavor of Linux suits you. Because `docker-compose` is not installed as
|
||||
part of the base Docker installation on Linux, once you have a working engine, follow the
|
||||
[docker-compose installation instructions](https://docs.docker.com/compose/install/) for Linux.
|
||||
|
||||
**Windows**
|
||||
|
||||
Superset is not officially supported on Windows unfortunately. The best option for Windows users to
|
||||
try out Superset locally is to install an Ubuntu Desktop VM via
|
||||
[VirtualBox](https://www.virtualbox.org/) and proceed with the Docker on Linux instructions inside
|
||||
of that VM. We recommend assigning at least 8GB of RAM to the virtual machine as well as
|
||||
provisioning a hard drive of at least 40GB, so that there will be enough space for both the OS and
|
||||
all of the required dependencies.
|
||||
|
||||
### 2. Clone Superset's Github repository
|
||||
|
||||
[Clone Superset's repo](https://github.com/apache/incubator-superset) in your terminal with the
|
||||
following command:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/apache/incubator-superset.git
|
||||
```
|
||||
|
||||
Once that command completes successfully, you should see a new `incubator-superset` folder in your
|
||||
current directory.
|
||||
|
||||
### 3. Launch Superset Through Docker Compose
|
||||
|
||||
Navigate to the folder you created in step 1:
|
||||
|
||||
```bash
|
||||
$ cd incubator-superset
|
||||
```
|
||||
|
||||
Then, run the following command:
|
||||
|
||||
```bash
|
||||
$ docker-compose up
|
||||
```
|
||||
|
||||
You should see a wall of logging output from the containers being launched on your machine. Once
|
||||
this output slows, you should have a running instance of Superset on your local machine!
|
||||
|
||||
### 4. Login to Superset
|
||||
|
||||
Your local Superset instance also includes a Postgres server to store your data and is already
|
||||
pre-loaded with some example datasets that ship with Superset. You can access Superset now via your
|
||||
web browser by visiting `http://localhost:8088`. Note that many browsers now default to `https` - if
|
||||
yours is one of them, please make sure it uses `http`.
|
||||
|
||||
Log in with the default username and password:
|
||||
|
||||
```bash
|
||||
username: admin
|
||||
```
|
||||
|
||||
```bash
|
||||
password: admin
|
||||
```
|
||||
|
||||
Congrats! You have successfully installed Superset! Click 'Next' to learn how to connect a database
|
||||
driver.
|
||||
133
docs/src/pages/docs/installation/installing_scratch.mdx
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
name: Installing From Scratch
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/installing-superset-from-scratch
|
||||
index: 2
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Installing Superset from Scratch
|
||||
|
||||
### OS Dependencies
|
||||
|
||||
Superset stores database connection information in its metadata database. For that purpose, we use
|
||||
the cryptography Python library to encrypt connection passwords. Unfortunately, this library has OS
|
||||
level dependencies.
|
||||
|
||||
**Debian and Ubuntu**
|
||||
|
||||
The following command will ensure that the required dependencies are installed:
|
||||
|
||||
```
|
||||
sudo apt-get install build-essential libssl-dev libffi-dev python-dev python-pip libsasl2-dev libldap2-dev
|
||||
```
|
||||
|
||||
In Ubuntu 18.04 If you have python3.6 installed alongside with python2.7 (which is default on Ubuntu
|
||||
18.04), run this command also:
|
||||
|
||||
```
|
||||
sudo apt-get install build-essential libssl-dev libffi-dev python3.6-dev python-pip libsasl2-dev libldap2-dev
|
||||
```
|
||||
|
||||
**Fedora and RHEL-derivative Linux distributions**
|
||||
|
||||
Install the following packages using the `yum` package manager:
|
||||
|
||||
```
|
||||
sudo yum install gcc gcc-c++ libffi-devel python-devel python-pip python-wheel openssl-devel cyrus-sasl-devel openldap-devel
|
||||
```
|
||||
|
||||
**Mac OS X**
|
||||
|
||||
If you're not on the latest version of OS X, we recommend upgrading because we've found that many
|
||||
issues people have run into are linked to older versions of Mac OS X. After updating, install the
|
||||
latest version of XCode command line tools:
|
||||
|
||||
```
|
||||
xcode-select --install
|
||||
```
|
||||
|
||||
We don't recommend using the system installed Python. Instead, first install the
|
||||
[homebrew](https://brew.sh/) manager and then run the following commands:
|
||||
|
||||
```
|
||||
brew install pkg-config libffi openssl python
|
||||
|
||||
env LDFLAGS="-L$(brew --prefix openssl)/lib" CFLAGS="-I$(brew --prefix openssl)/include" pip install cryptography==2.4.2
|
||||
```
|
||||
|
||||
Let's also make sure we have the latest version of `pip` and `setuptools`:
|
||||
|
||||
```
|
||||
pip install --upgrade setuptools pip
|
||||
```
|
||||
|
||||
### Python Virtual Environment
|
||||
|
||||
We highly recommend installing Superset insode of a virtual environment. Python 3 ships with
|
||||
`virtualenv` out of the box but you can install it using:
|
||||
|
||||
```
|
||||
pip install virtualenv
|
||||
```
|
||||
|
||||
You can create and activate a virtual environment using:
|
||||
|
||||
```
|
||||
# virtualenv is shipped in Python 3.6+ as venv instead of pyvenv.
|
||||
# See https://docs.python.org/3.6/library/venv.html
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
```
|
||||
|
||||
Once you activated your virtual environment, all of the Python packages you install or uninstall
|
||||
will be confined to this environment. You can exit the environment by running `deactivate` on the
|
||||
command line.
|
||||
|
||||
### Installing and Initializing Superset
|
||||
|
||||
First, start by installing `apache-superset`:
|
||||
|
||||
```
|
||||
pip install apache-superset
|
||||
```
|
||||
|
||||
Then, you need to initialize the database:
|
||||
|
||||
```
|
||||
superset db upgrade
|
||||
```
|
||||
|
||||
Finish installing by running through the following commands:
|
||||
|
||||
```
|
||||
# Create an admin user (you will be prompted to set a username, first and last name before setting a password)
|
||||
$ export FLASK_APP=superset
|
||||
superset fab create-admin
|
||||
|
||||
# Load some data to play with
|
||||
superset load_examples
|
||||
|
||||
# Create default roles and permissions
|
||||
superset init
|
||||
|
||||
# To start a development web server on port 8088, use -p to bind to another port
|
||||
superset run -p 8088 --with-threads --reload --debugger
|
||||
```
|
||||
|
||||
If everything worked, you should be able to navigate to `hostname:port` in your browser (e.g.
|
||||
locally by default at `localhost:8088`) and login using the username and password you created.
|
||||
|
||||
### Installing Superset with Helm in Kubernetes
|
||||
|
||||
You can install Superset into Kubernetes with [Helm](https://helm.sh/). The chart is located in
|
||||
`install/helm`.
|
||||
|
||||
To install Superset in Kubernetes, run:
|
||||
|
||||
```
|
||||
helm upgrade --install superset ./install/helm/superset
|
||||
```
|
||||
|
||||
Note that the above command will install Superset into `default` namespace of your Kubernetes
|
||||
cluster.
|
||||
45
docs/src/pages/docs/installation/networking_settings.mdx
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: Additional Networking Settings
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/networking-settings
|
||||
index: 4
|
||||
version: 1
|
||||
---
|
||||
|
||||
## Additional Networking Settings
|
||||
|
||||
### CORS
|
||||
|
||||
To configure CORS, or cross-origin research sharing, the following dependency must be installed:
|
||||
|
||||
```python
|
||||
pip install apache-superset[cors]
|
||||
```
|
||||
|
||||
The following keys in `superset_config.py` can be specified to configure CORS:
|
||||
|
||||
- `ENABLE_CORS`: Must be set to `True` in order to enable CORS
|
||||
- `CORS_OPTIONS`: options passed to Flask-CORS
|
||||
([documentation](https://flask-cors.corydolphin.com/en/latest/api.html#extension))
|
||||
|
||||
### Domain Sharding
|
||||
|
||||
Chrome allows up to 6 open connections per domain at a time. When there are more than 6 slices in
|
||||
dashboard, a lot of time fetch requests are queued up and wait for next available socket.
|
||||
[PR 5039](https://github.com/apache/incubator-superset/pull/5039) adds domain sharding to Superset,
|
||||
and this feature will be enabled by configuration only (by default Superset doesn’t allow
|
||||
cross-domain request).
|
||||
|
||||
Add the following setting in your `superset_config.py` file:
|
||||
|
||||
- `SUPERSET_WEBSERVER_DOMAINS`: list of allowed hostnames for domain shareding feature.
|
||||
|
||||
### Middleware
|
||||
|
||||
Superset allows you to add your own middleware. To add your own middleware, update the
|
||||
`ADDITIONAL_MIDDLEWARE` key in your `superset_config.py`. `ADDITIONAL_MIDDLEWARE` should be a list
|
||||
of your additional middleware classes.
|
||||
|
||||
For example, to use `AUTH_REMOTE_USER` from behind a proxy server like nginx, you have to add a
|
||||
simple middleware class to add the value of `HTTP_X_PROXY_REMOTE_USER` (or any other custom header
|
||||
from the proxy) to Gunicorn’s `REMOTE_USER` environment variable:
|
||||
89
docs/src/pages/docs/installation/sql_templating.mdx
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
name: SQL Templating
|
||||
menu: Installation and Configuration
|
||||
route: /docs/installation/sql-templating
|
||||
index: 10
|
||||
version: 1
|
||||
---
|
||||
|
||||
## SQL Templating
|
||||
|
||||
### Jinja Templates
|
||||
|
||||
SQL Lab supports [Jinja templating](https://jinja.palletsprojects.com/en/2.11.x/) in queries. You'll
|
||||
need to to overload the default Jinja context in your environment by defining the
|
||||
JINJA_CONTEXT_ADDONS in your superset configuration (`superset_config.py`). Objects referenced in
|
||||
this dictionary are made available for users to use in their SQL code.
|
||||
|
||||
```python
|
||||
JINJA_CONTEXT_ADDONS = {
|
||||
'my_crazy_macro': lambda x: x*2,
|
||||
}
|
||||
```
|
||||
|
||||
Besides default Jinja templating, SQL lab also supports self-defined template processor by setting
|
||||
the `CUSTOM_TEMPLATE_PROCESSORS` in your superset configuration. The values in this dictionary
|
||||
overwrite the default Jinja template processors of the specified database engine. The example below
|
||||
configures a custom presto template processor which implements its own logic of processing macro
|
||||
template with regex parsing. It uses the `$` style macro instead of `{{ }}` style in Jinja
|
||||
templating.
|
||||
|
||||
By configuring it with `CUSTOM_TEMPLATE_PROCESSORS`, a SQL template on a presto database is
|
||||
processed by the custom one rather than the default one.
|
||||
|
||||
```python
|
||||
def DATE(
|
||||
ts: datetime, day_offset: SupportsInt = 0, hour_offset: SupportsInt = 0
|
||||
) -> str:
|
||||
"""Current day as a string."""
|
||||
day_offset, hour_offset = int(day_offset), int(hour_offset)
|
||||
offset_day = (ts + timedelta(days=day_offset, hours=hour_offset)).date()
|
||||
return str(offset_day)
|
||||
|
||||
class CustomPrestoTemplateProcessor(PrestoTemplateProcessor):
|
||||
"""A custom presto template processor."""
|
||||
|
||||
engine = "presto"
|
||||
|
||||
def process_template(self, sql: str, **kwargs) -> str:
|
||||
"""Processes a sql template with $ style macro using regex."""
|
||||
# Add custom macros functions.
|
||||
macros = {
|
||||
"DATE": partial(DATE, datetime.utcnow())
|
||||
} # type: Dict[str, Any]
|
||||
# Update with macros defined in context and kwargs.
|
||||
macros.update(self.context)
|
||||
macros.update(kwargs)
|
||||
|
||||
def replacer(match):
|
||||
"""Expand $ style macros with corresponding function calls."""
|
||||
macro_name, args_str = match.groups()
|
||||
args = [a.strip() for a in args_str.split(",")]
|
||||
if args == [""]:
|
||||
args = []
|
||||
f = macros[macro_name[1:]]
|
||||
return f(*args)
|
||||
|
||||
macro_names = ["$" + name for name in macros.keys()]
|
||||
pattern = r"(%s)\s*\(([^()]*)\)" % "|".join(map(re.escape, macro_names))
|
||||
return re.sub(pattern, replacer, sql)
|
||||
|
||||
CUSTOM_TEMPLATE_PROCESSORS = {
|
||||
CustomPrestoTemplateProcessor.engine: CustomPrestoTemplateProcessor
|
||||
}
|
||||
```
|
||||
|
||||
SQL Lab also includes a live query validation feature with pluggable backends. You can configure
|
||||
which validation implementation is used with which database engine by adding a block like the
|
||||
following to your configuration file:
|
||||
|
||||
```python
|
||||
FEATURE_FLAGS = {
|
||||
'SQL_VALIDATORS_BY_ENGINE': {
|
||||
'presto': 'PrestoDBSQLValidator',
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The available validators and names can be found in
|
||||
[sql_validators](https://github.com/apache/incubator-superset/tree/master/superset/sql_validators).
|
||||