diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..b9e59549e --- /dev/null +++ b/.gitignore @@ -0,0 +1,69 @@ +# Compiled source # +################### +*.com +*.class +*.dll +*.exe +*.o +*.so + +# Packages # +############ +# it's better to unpack these files and commit the raw source +# git has its own built in compression methods +*.7z +*.dmg +*.gz +*.iso +*.jar +*.rar +*.tar +*.zip + +# Logs and databases # +###################### +*.log + +# OS generated files # +###################### +.DS_Store* +ehthumbs.db +Icon? +Thumbs.db + +# Editor Files # +################ +*~ +*.swp + +# Gradle Files # +################ +.gradle +.m2 +gradle + +# Build output directies +/target +*/target +/build +*/build + +# IntelliJ specific files/directories +out +.idea +*.ipr +*.iws +*.iml +atlassian-ide-plugin.xml + +# Eclipse specific files/directories +.classpath +.project +.settings +.metadata + +# NetBeans specific files/directories +.nbattrs + +# Data created by elastic search tests +metacat-main/data/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..7f8ced0d1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2012 Netflix, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 000000000..c05bef049 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ + +Metacat -- TODO diff --git a/build.gradle b/build.gradle new file mode 100644 index 000000000..ed4b324e3 --- /dev/null +++ b/build.gradle @@ -0,0 +1,69 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +buildscript { + repositories { + jcenter() + } +} + +plugins { + id 'nebula.netflixoss' version '3.2.3' +} + +ext.githubProjectName = rootProject.name + +allprojects { + repositories { + jcenter() + } +} + +subprojects { + apply plugin: 'nebula.netflixoss' + apply plugin: 'nebula.source-jar' + apply plugin: 'java' + apply plugin: 'groovy' + apply plugin: 'checkstyle' + apply plugin: 'findbugs' + apply plugin: 'pmd' + + group = "com.netflix.${githubProjectName}" + + sourceCompatibility = 1.8 + targetCompatibility = 1.8 + + dependencies { + testCompile 'org.spockframework:spock-core:1.0-groovy-2.4' + testCompile 'org.spockframework:spock-guice:1.0-groovy-2.4' + testCompile 'org.codehaus.gpars:gpars:1.2.1' + testCompile 'org.objenesis:objenesis:2.2' // Required by spock to mock classes + } + + checkstyle { + toolVersion = '6.11' + configFile = new File(project.parent.projectDir, "codequality/checkstyle/checkstyle.xml") + } + + findbugs { + ignoreFailures = true + excludeFilter = new File(project.parent.projectDir, "codequality/findbugs/excludeFilter.xml") + } + + tasks.withType(Pmd) { + reports.html.enabled true + } +} diff --git a/codequality/HEADER b/codequality/HEADER new file mode 100644 index 000000000..3102e4b44 --- /dev/null +++ b/codequality/HEADER @@ -0,0 +1,13 @@ +Copyright ${year} Netflix, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/codequality/checkstyle.xml b/codequality/checkstyle.xml new file mode 100644 index 000000000..ca1412bb7 --- /dev/null +++ b/codequality/checkstyle.xml @@ -0,0 +1,190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 000000000..4a8070da5 --- /dev/null +++ b/gradle.properties @@ -0,0 +1,24 @@ +# +# Copyright 2014 Netflix, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +airlift_version=0.116 +commons_dbcp_version=2.1 +guava_version=18.0 +guice_version=4.0-beta5 +jackson_version=2.5.4 +jersey_version=2.19 +presto_version=0.118 +slf4j_version=1.7.12 +swagger_version=1.3.12 diff --git a/gradlew b/gradlew new file mode 100755 index 000000000..9d82f7891 --- /dev/null +++ b/gradlew @@ -0,0 +1,160 @@ +#!/usr/bin/env bash + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn ( ) { + echo "$*" +} + +die ( ) { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; +esac + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules +function splitJvmOpts() { + JVM_OPTS=("$@") +} +eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS +JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" + +exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 000000000..aec99730b --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,90 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windowz variants + +if not "%OS%" == "Windows_NT" goto win9xME_args +if "%@eval[2+2]" == "4" goto 4NT_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* +goto execute + +:4NT_args +@rem Get arguments from the 4NT Shell from JP Software +set CMD_LINE_ARGS=%$ + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/metacat-client/build.gradle b/metacat-client/build.gradle new file mode 100644 index 000000000..fc1345cbc --- /dev/null +++ b/metacat-client/build.gradle @@ -0,0 +1,24 @@ +apply plugin: 'java' + +compileJava { + sourceCompatibility = '1.7' + targetCompatibility = '1.7' +} + +repositories { + mavenCentral() +} + +dependencies { + compile project(':metacat-common') + + compile 'com.netflix.feign:feign-core:8.6.0' + compile 'com.netflix.feign:feign-jaxrs:8.6.0' + compile 'com.netflix.feign:feign-slf4j:8.6.0' + compile 'com.fasterxml.jackson.datatype:jackson-datatype-guava:latest.release' + compile 'com.fasterxml.jackson.module:jackson-module-jaxb-annotations:latest.release' + compile "org.slf4j:slf4j-api:${slf4j_version}" + compile 'org.glassfish.jersey.core:jersey-server:2.15' + + testCompile project(':metacat-common').sourceSets.test.output +} diff --git a/metacat-client/src/main/java/com/netflix/metacat/client/Client.java b/metacat-client/src/main/java/com/netflix/metacat/client/Client.java new file mode 100644 index 000000000..85f54928f --- /dev/null +++ b/metacat-client/src/main/java/com/netflix/metacat/client/Client.java @@ -0,0 +1,200 @@ +package com.netflix.metacat.client; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.datatype.guava.GuavaModule; +import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule; +import com.netflix.metacat.client.module.JacksonDecoder; +import com.netflix.metacat.client.module.JacksonEncoder; +import com.netflix.metacat.client.module.MetacatErrorDecoder; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.api.MetacatV1; +import com.netflix.metacat.common.api.MetadataV1; +import com.netflix.metacat.common.api.PartitionV1; +import com.netflix.metacat.common.json.MetacatJsonLocator; +import feign.Feign; +import feign.Request; +import feign.RequestInterceptor; +import feign.RequestTemplate; +import feign.Retryer; +import feign.jaxrs.JAXRSContract; +import feign.slf4j.Slf4jLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import java.util.concurrent.TimeUnit; + +import static com.google.common.base.Preconditions.checkArgument; + +/** + * Client to communicate with Metacat. This version depends on the Feign library. + */ +public class Client { + private static final Logger log = LoggerFactory.getLogger(Client.class); + private final MetacatV1 api; + private final Feign.Builder feignBuilder; + private final String host; + private final PartitionV1 partitionApi; + private final MetadataV1 metadataApi; + + private Client( + @Nonnull String host, + @Nonnull feign.Logger.Level logLevel, + @Nonnull RequestInterceptor requestInterceptor, + @Nonnull Retryer retryer, + @Nonnull Request.Options options + ) { + ObjectMapper mapper = MetacatJsonLocator.INSTANCE + .getPrettyObjectMapper() + .copy() + .registerModule(new GuavaModule()) + .registerModule(new JaxbAnnotationModule()); + + log.info("Connecting to {}", host); + this.host = host; + + feignBuilder = Feign.builder() + .logger(new Slf4jLogger()) + .logLevel(logLevel) + .contract(new JAXRSContract()) + .encoder(new JacksonEncoder(mapper)) + .decoder(new JacksonDecoder(mapper)) + .errorDecoder(new MetacatErrorDecoder()) + .requestInterceptor(requestInterceptor) + .retryer(retryer) + .options(options); + + api = getApiClient(MetacatV1.class); + partitionApi = getApiClient(PartitionV1.class); + metadataApi = getApiClient(MetadataV1.class); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private String host; + private String userName; + private String clientAppName; + private String jobId; + private String dataTypeContext; + private feign.Logger.Level logLevel; + private Retryer retryer; + private RequestInterceptor requestInterceptor; + private Request.Options requestOptions; + + public Builder withLogLevel(feign.Logger.Level logLevel) { + this.logLevel = logLevel; + return this; + } + + public Builder withHost(String host) { + this.host = host; + return this; + } + + public Builder withRetryer(Retryer retryer) { + this.retryer = retryer; + return this; + } + + public Builder withUserName(String userName) { + this.userName = userName; + return this; + } + + public Builder withClientAppName(String appName) { + this.clientAppName = appName; + return this; + } + + public Builder withJobId(String jobId) { + this.jobId = jobId; + return this; + } + + public Builder withDataTypeContext(String dataTypeContext) { + this.dataTypeContext = dataTypeContext; + return this; + } + + public Builder withRequestInterceptor(RequestInterceptor requestInterceptor) { + this.requestInterceptor = requestInterceptor; + return this; + } + + + public Builder withRequestOptions(Request.Options requestOptions) { + this.requestOptions = requestOptions; + return this; + } + + public Client build() { + checkArgument(userName != null, "User name cannot be null"); + checkArgument(clientAppName != null, "Client application name cannot be null"); + if(host == null){ + host = System.getProperty("netflix.metacat.host", System.getenv("NETFLIX_METACAT_HOST")); + } + checkArgument(host != null, "Host cannot be null"); + if( retryer == null){ + retryer = new Retryer.Default(TimeUnit.MINUTES.toMillis(30), TimeUnit.MINUTES.toMillis(30), 0); + } + RequestInterceptor interceptor = new RequestInterceptor() { + @Override + public void apply(RequestTemplate template) { + if( requestInterceptor != null) { + requestInterceptor.apply(template); + } + template.header(MetacatContext.HEADER_KEY_USER_NAME, userName); + template.header(MetacatContext.HEADER_KEY_CLIENT_APP_NAME, clientAppName); + template.header(MetacatContext.HEADER_KEY_JOB_ID, jobId); + template.header(MetacatContext.HEADER_KEY_DATA_TYPE_CONTEXT, dataTypeContext); + } + }; + if( requestOptions == null){ + requestOptions = new Request.Options((int)TimeUnit.MINUTES.toMillis(10), (int)TimeUnit.MINUTES.toMillis(30)); + } + if( logLevel == null){ + logLevel = feign.Logger.Level.NONE; + } + return new Client( host, logLevel, interceptor, retryer, requestOptions); + } + } + + /** + * Returns an API instance that conforms to the given API Type that can communicate with the Metacat server + * + * @param apiType A JAX-RS annotated Metacat interface + * @return An instance that implements the given interface and is wired up to communicate with the Metacat server. + */ + public T getApiClient(@Nonnull Class apiType) { + checkArgument(apiType.isInterface(), "apiType must be an interface"); + + return feignBuilder.target(apiType, host); + } + + /** + * Return an API instance that can be used to interact with the metacat server + * @return An instance api conforming to MetacatV1 interface + */ + public MetacatV1 getApi(){ + return api; + } + + /** + * Return an API instance that can be used to interact with the metacat server for partitions + * @return An instance api conforming to PartitionV1 interface + */ + public PartitionV1 getPartitionApi(){ + return partitionApi; + } + + /** + * Return an API instance that can be used to interact with the metacat server for only user metadata + * @return An instance api conforming to MetadataV1 interface + */ + public MetadataV1 getMetadataApi(){ + return metadataApi; + } +} diff --git a/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonDecoder.java b/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonDecoder.java new file mode 100644 index 000000000..faa528a6a --- /dev/null +++ b/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonDecoder.java @@ -0,0 +1,35 @@ +package com.netflix.metacat.client.module; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.RuntimeJsonMappingException; +import feign.Response; +import feign.codec.Decoder; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Type; + +public class JacksonDecoder implements Decoder { + private final ObjectMapper mapper; + + public JacksonDecoder(ObjectMapper mapper) { + this.mapper = mapper; + } + + @Override + public Object decode(Response response, Type type) throws IOException { + if (response.body() == null || response.status() == 204 + || (response.body().length() != null && response.body().length() == 0)) { + return null; + } + InputStream inputStream = response.body().asInputStream(); + try { + return mapper.readValue(inputStream, mapper.constructType(type)); + } catch (RuntimeJsonMappingException e) { + if (e.getCause() != null && e.getCause() instanceof IOException) { + throw IOException.class.cast(e.getCause()); + } + throw e; + } + } +} diff --git a/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonEncoder.java b/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonEncoder.java new file mode 100644 index 000000000..6046f29dc --- /dev/null +++ b/metacat-client/src/main/java/com/netflix/metacat/client/module/JacksonEncoder.java @@ -0,0 +1,35 @@ +package com.netflix.metacat.client.module; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import feign.RequestTemplate; +import feign.codec.EncodeException; +import feign.codec.Encoder; + +import java.lang.reflect.Type; + +public class JacksonEncoder implements Encoder { + private final ObjectMapper mapper; + + public JacksonEncoder(ObjectMapper mapper) { + this.mapper = mapper; + } + + /** + * Converts objects to an appropriate representation in the template. + * + * @param object what to encode as the request body. + * @param bodyType the type the object should be encoded as. {@code Map}, if form + * encoding. + * @param template the request template to populate. + * @throws feign.codec.EncodeException when encoding failed due to a checked exception. + */ + @Override + public void encode(Object object, Type bodyType, RequestTemplate template) throws EncodeException { + try { + template.body(mapper.writeValueAsString(object)); + } catch (JsonProcessingException e) { + throw new EncodeException(e.getMessage(), e); + } + } +} diff --git a/metacat-client/src/main/java/com/netflix/metacat/client/module/MetacatErrorDecoder.java b/metacat-client/src/main/java/com/netflix/metacat/client/module/MetacatErrorDecoder.java new file mode 100644 index 000000000..10ba28967 --- /dev/null +++ b/metacat-client/src/main/java/com/netflix/metacat/client/module/MetacatErrorDecoder.java @@ -0,0 +1,56 @@ +package com.netflix.metacat.client.module; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.exception.MetacatAlreadyExistsException; +import com.netflix.metacat.common.exception.MetacatBadRequestException; +import com.netflix.metacat.common.exception.MetacatException; +import com.netflix.metacat.common.exception.MetacatNotFoundException; +import com.netflix.metacat.common.exception.MetacatNotSupportedException; +import com.netflix.metacat.common.json.MetacatJson; +import com.netflix.metacat.common.json.MetacatJsonException; +import com.netflix.metacat.common.json.MetacatJsonLocator; +import feign.Response; +import feign.RetryableException; +import feign.Util; + +import java.io.IOException; + +import static javax.ws.rs.core.Response.Status; + +/** + * Module that provides a error decoder, used to parse errors + */ +public class MetacatErrorDecoder extends feign.codec.ErrorDecoder.Default { + private static final MetacatJson metacatJson = MetacatJsonLocator.INSTANCE; + @Override + public Exception decode(String methodKey, Response response){ + try { + Status status = Status.fromStatusCode(response.status()); + String message = ""; + if (response.body() != null){ + message = Util.toString(response.body().asReader()); + try { + ObjectNode body = metacatJson.parseJsonObject(message); + message = body.path("error").asText(); + } catch (MetacatJsonException ignored) {} + } + switch (status) { + case UNSUPPORTED_MEDIA_TYPE: + return new MetacatNotSupportedException(message); + case BAD_REQUEST: + return new MetacatBadRequestException(message); + case NOT_FOUND: + return new MetacatNotFoundException(message); + case CONFLICT: + return new MetacatAlreadyExistsException(message); + case INTERNAL_SERVER_ERROR: + case SERVICE_UNAVAILABLE: + return new RetryableException(message, null); + default: + return new MetacatException(message, Status.INTERNAL_SERVER_ERROR, null); + } + } catch (IOException e) { + return super.decode(methodKey, response); + } + } +} diff --git a/metacat-common-server/build.gradle b/metacat-common-server/build.gradle new file mode 100644 index 000000000..fe8d13219 --- /dev/null +++ b/metacat-common-server/build.gradle @@ -0,0 +1,11 @@ +dependencies { + compile project(':metacat-common') + compile "com.facebook.presto:presto-spi:${presto_version}" + compile 'org.apache.tomcat:tomcat-jdbc:8.0.22' + compile "com.google.guava:guava:${guava_version}" + compile "com.google.inject:guice:${guice_version}" + compile 'com.netflix.archaius:archaius-core:0.6.5' + compile 'com.netflix.servo:servo-core:0.8.3' + compile "io.airlift:configuration:${airlift_version}" + compile "org.slf4j:slf4j-api:${slf4j_version}" +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/exception/CatalogNotFoundException.java b/metacat-common-server/src/main/java/com/facebook/presto/exception/CatalogNotFoundException.java new file mode 100644 index 000000000..2f7109d8f --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/exception/CatalogNotFoundException.java @@ -0,0 +1,18 @@ +package com.facebook.presto.exception; + +import com.facebook.presto.spi.NotFoundException; + +/** + * Created by amajumdar on 4/30/15. + */ +public class CatalogNotFoundException extends NotFoundException{ + private final String catalogName; + public CatalogNotFoundException(String catalogName) { + this(catalogName, null); + } + + public CatalogNotFoundException(String catalogName, Throwable cause) { + super(String.format("Catalog %s not found.", catalogName), cause); + this.catalogName = catalogName; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/exception/InvalidMetaException.java b/metacat-common-server/src/main/java/com/facebook/presto/exception/InvalidMetaException.java new file mode 100644 index 000000000..9fe891d0a --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/exception/InvalidMetaException.java @@ -0,0 +1,34 @@ +package com.facebook.presto.exception; + +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.StandardErrorCode; + +/** + * Created by amajumdar on 5/11/15. + */ +public class InvalidMetaException extends PrestoException { + private SchemaTableName tableName; + private String partitionId; + + public InvalidMetaException(SchemaTableName tableName, Throwable cause) { + super(StandardErrorCode.USER_ERROR + , String.format("Invalid metadata for %s.", tableName) + , cause); + this.tableName = tableName; + } + + public InvalidMetaException(SchemaTableName tableName, String partitionId, Throwable cause) { + super(StandardErrorCode.USER_ERROR + , String.format("Invalid metadata for %s for partition %s.", tableName, partitionId) + , cause); + this.tableName = tableName; + this.partitionId = partitionId; + } + + public InvalidMetaException(String message, Throwable cause) { + super(StandardErrorCode.USER_ERROR + , message + , cause); + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/exception/PartitionAlreadyExistsException.java b/metacat-common-server/src/main/java/com/facebook/presto/exception/PartitionAlreadyExistsException.java new file mode 100644 index 000000000..0c0c38369 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/exception/PartitionAlreadyExistsException.java @@ -0,0 +1,22 @@ +package com.facebook.presto.exception; + +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.StandardErrorCode; + +/** + * Created by amajumdar on 4/30/15. + */ +public class PartitionAlreadyExistsException extends PrestoException{ + private final SchemaTableName tableName; + private final String partitionId; + public PartitionAlreadyExistsException(SchemaTableName tableName, String partitionId) { + this(tableName, partitionId, null); + } + + public PartitionAlreadyExistsException(SchemaTableName tableName, String partitionId, Throwable cause) { + super(StandardErrorCode.ALREADY_EXISTS, String.format("Partition %s already exists for table %s", tableName, partitionId==null?"": partitionId), cause); + this.tableName = tableName; + this.partitionId = partitionId; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/exception/PartitionNotFoundException.java b/metacat-common-server/src/main/java/com/facebook/presto/exception/PartitionNotFoundException.java new file mode 100644 index 000000000..d4917bf01 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/exception/PartitionNotFoundException.java @@ -0,0 +1,21 @@ +package com.facebook.presto.exception; + +import com.facebook.presto.spi.NotFoundException; +import com.facebook.presto.spi.SchemaTableName; + +/** + * Created by amajumdar on 4/30/15. + */ +public class PartitionNotFoundException extends NotFoundException{ + private final SchemaTableName tableName; + private final String partitionId; + public PartitionNotFoundException(SchemaTableName tableName, String partitionId) { + this(tableName, partitionId, null); + } + + public PartitionNotFoundException(SchemaTableName tableName, String partitionId, Throwable cause) { + super(String.format("Partition %s not found for table %s", tableName, partitionId==null?"": partitionId), cause); + this.tableName = tableName; + this.partitionId = partitionId; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/exception/SchemaAlreadyExistsException.java b/metacat-common-server/src/main/java/com/facebook/presto/exception/SchemaAlreadyExistsException.java new file mode 100644 index 000000000..1e0289012 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/exception/SchemaAlreadyExistsException.java @@ -0,0 +1,19 @@ +package com.facebook.presto.exception; + +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.StandardErrorCode; + +/** + * Created by amajumdar on 4/30/15. + */ +public class SchemaAlreadyExistsException extends PrestoException{ + private final String schemaName; + public SchemaAlreadyExistsException(String schemaName) { + this(schemaName, null); + } + + public SchemaAlreadyExistsException(String schemaName, Throwable cause) { + super(StandardErrorCode.ALREADY_EXISTS, String.format("Schema %s already exists.", schemaName), cause); + this.schemaName = schemaName; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/plugin/ColumnDetailHandle.java b/metacat-common-server/src/main/java/com/facebook/presto/plugin/ColumnDetailHandle.java new file mode 100644 index 000000000..56dcf77e6 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/plugin/ColumnDetailHandle.java @@ -0,0 +1,167 @@ +package com.facebook.presto.plugin; + +import com.facebook.presto.spi.ColumnDetailMetadata; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.type.Type; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +import static com.google.common.base.MoreObjects.toStringHelper; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 9/30/15. + */ +public class ColumnDetailHandle implements ColumnHandle { + private final String connectorId; + private final String columnName; + private final Type columnType; + private final Boolean isPartitionKey; + private final String comment; + private final String sourceType; + private final Integer size; + private final Boolean isNullable; + private final String defaultValue; + private final Boolean isSortKey; + private final Boolean isIndexKey; + @JsonCreator + public ColumnDetailHandle( + @JsonProperty("connectorId") + String connectorId, + @JsonProperty("columnName") + String columnName, + @JsonProperty("columnType") + Type columnType, + @JsonProperty("columnType") + Boolean isPartitionKey, + @JsonProperty("columnType") + String comment, + @JsonProperty("sourceType") + String sourceType, + @JsonProperty("size") + Integer size, + @JsonProperty("isNullable") + Boolean isNullable, + @JsonProperty("defaultValue") + String defaultValue, + @JsonProperty("isSortKey") + Boolean isSortKey, + @JsonProperty("isIndexKey") + Boolean isIndexKey) { + this.connectorId = checkNotNull(connectorId, "connectorId is null"); + this.columnName = checkNotNull(columnName, "columnName is null"); + this.columnType = checkNotNull(columnType, "columnType is null"); + this.isPartitionKey = isPartitionKey; + this.comment = comment; + this.sourceType = sourceType; + this.size = size; + this.isNullable = isNullable; + this.defaultValue = defaultValue; + this.isSortKey = isSortKey; + this.isIndexKey = isIndexKey; + } + + @JsonProperty + public String getConnectorId() + { + return connectorId; + } + + @JsonProperty + public String getColumnName() + { + return columnName; + } + + @JsonProperty + public Type getColumnType() + { + return columnType; + } + + @JsonProperty + public Boolean getIsPartitionKey() { + return isPartitionKey; + } + + @JsonProperty + public String getComment() { + return comment; + } + + @JsonProperty + public String getSourceType() { + return sourceType; + } + + @JsonProperty + public Integer getSize() { + return size; + } + + @JsonProperty + public Boolean getIsNullable() { + return isNullable; + } + + @JsonProperty + public String getDefaultValue() { + return defaultValue; + } + + @JsonProperty + public Boolean getIsSortKey() { + return isSortKey; + } + + @JsonProperty + public Boolean getIsIndexKey() { + return isIndexKey; + } + + public ColumnMetadata getColumnMetadata() + { + StringBuilder comments = new StringBuilder(comment==null?"":comment).append(" ") + .append("nullable=").append(isNullable).append(", ") + .append("columnLength=").append(size).append(", ") + .append("default=").append(defaultValue).append(", ") + .append("sortKey=").append(isSortKey).append(", ") + .append("indexKey=").append(isIndexKey); + + return new ColumnDetailMetadata(columnName, columnType, isPartitionKey==null?false:isPartitionKey, + comments.toString(), false, sourceType, size, isNullable, defaultValue, isSortKey, isIndexKey); + } + + @Override + public boolean equals(Object obj) + { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + ColumnDetailHandle o = (ColumnDetailHandle) obj; + return Objects.equals(this.connectorId, o.connectorId) && + Objects.equals(this.columnName, o.columnName); + } + + @Override + public int hashCode() + { + return Objects.hash(connectorId, columnName); + } + + @Override + public String toString() + { + return toStringHelper(this) + .add("connectorId", connectorId) + .add("columnName", columnName) + .add("columnType", columnType) + .toString(); + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/AuditInfo.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/AuditInfo.java new file mode 100644 index 000000000..caf50d33d --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/AuditInfo.java @@ -0,0 +1,53 @@ +package com.facebook.presto.spi; + +/** + * Created by amajumdar on 3/3/15. + */ +public class AuditInfo { + private String createdBy; + private String lastUpdatedBy; + private Long createdDate; + private Long lastUpdatedDate; + + public AuditInfo() { + } + + public AuditInfo(String createdBy, String lastUpdatedBy, Long createdDate, Long lastUpdatedDate) { + this.createdBy = createdBy; + this.lastUpdatedBy = lastUpdatedBy; + this.createdDate = createdDate; + this.lastUpdatedDate = lastUpdatedDate; + } + + public String getCreatedBy() { + return createdBy; + } + + public void setCreatedBy(String createdBy) { + this.createdBy = createdBy; + } + + public String getLastUpdatedBy() { + return lastUpdatedBy; + } + + public void setLastUpdatedBy(String lastUpdatedBy) { + this.lastUpdatedBy = lastUpdatedBy; + } + + public Long getCreatedDate() { + return createdDate; + } + + public void setCreatedDate(Long createdDate) { + this.createdDate = createdDate; + } + + public Long getLastUpdatedDate() { + return lastUpdatedDate; + } + + public void setLastUpdatedDate(Long lastUpdatedDate) { + this.lastUpdatedDate = lastUpdatedDate; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ColumnDetailMetadata.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ColumnDetailMetadata.java new file mode 100644 index 000000000..c55a3d2e6 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ColumnDetailMetadata.java @@ -0,0 +1,60 @@ +package com.facebook.presto.spi; + +import com.facebook.presto.spi.type.Type; + +/** + * Created by amajumdar on 9/28/15. + */ +public class ColumnDetailMetadata extends ColumnMetadata { + private final String sourceType; + private final Integer size; + private final Boolean isNullable; + private final String defaultValue; + private final Boolean isSortKey; + private final Boolean isIndexKey; + + public ColumnDetailMetadata(String name, Type type, boolean partitionKey, String sourceType) { + this(name , type, partitionKey, null, false, sourceType, null, null, null, null, null); + } + + public ColumnDetailMetadata(String name, Type type, boolean partitionKey, String comment, boolean hidden + , String sourceType) { + this(name , type, partitionKey, comment, hidden, sourceType, null, null, null, null, null); + } + + public ColumnDetailMetadata(String name, Type type, boolean partitionKey, String comment, boolean hidden + , String sourceType, Integer size, Boolean isNullable + , String defaultValue, Boolean isSortKey, Boolean isIndexKey) { + super(name, type, partitionKey, comment, hidden); + this.sourceType = sourceType; + this.size = size; + this.isNullable = isNullable; + this.defaultValue = defaultValue; + this.isSortKey = isSortKey; + this.isIndexKey = isIndexKey; + } + + public String getSourceType() { + return sourceType; + } + + public Boolean getIsNullable() { + return isNullable; + } + + public Integer getSize() { + return size; + } + + public String getDefaultValue() { + return defaultValue; + } + + public Boolean getIsSortKey() { + return isSortKey; + } + + public Boolean getIsIndexKey() { + return isIndexKey; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorDetailMetadata.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorDetailMetadata.java new file mode 100644 index 000000000..e0058fddf --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorDetailMetadata.java @@ -0,0 +1,61 @@ +package com.facebook.presto.spi; + +import com.google.common.collect.Lists; + +import java.util.List; + +/** + * Created by amajumdar on 1/15/15. + */ +public interface ConnectorDetailMetadata extends ConnectorMetadata { + /** + * Creates a schema with the given schemaName + * @param session connector session + * @param schema schema metadata + */ + void createSchema(ConnectorSession session, ConnectorSchemaMetadata schema); + + /** + * Updates a schema with the given schemaName + * @param session connector session + * @param schema schema metadata + */ + void updateSchema(ConnectorSession session, ConnectorSchemaMetadata schema); + + /** + * Drop a schema with the given schemaName + * @param session connector session + * @param schemaName schema name + */ + void dropSchema(ConnectorSession session, String schemaName); + + /** + * Return schema with the given schemaName + * @param session connector session + * @param schemaName schema name + */ + ConnectorSchemaMetadata getSchema(ConnectorSession session, String schemaName); + + /** + * Updates a table using the specified table metadata. + */ + ConnectorTableHandle alterTable(ConnectorSession session, ConnectorTableMetadata tableMetadata); + + /** + * Returns all the table names referring to the given uri + * @param uri location + * @param prefixSearch if tru, we look for tables whose location starts with the given uri + * @return list of table names + */ + default List getTableNames(String uri, boolean prefixSearch){ + return Lists.newArrayList(); + } + + /** + * Similar to listTables but this method will return the list of tables along with its metadata. + * @param session connector session + * @param schemaName schema name + * @return list of table metadata + */ + List listTableMetadatas(ConnectorSession session, String schemaName, List tableNames); +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorPartitionDetail.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorPartitionDetail.java new file mode 100644 index 000000000..99edbb43e --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorPartitionDetail.java @@ -0,0 +1,27 @@ +package com.facebook.presto.spi; + +import java.util.Map; + +/** + * Created by amajumdar on 2/2/15. + */ +public interface ConnectorPartitionDetail extends ConnectorPartition +{ + /** + * Gets any extra properties of a partition that is relevant to a particular catalog. + * @return extra properties other than the partition key + */ + Map getMetadata(); + + /** + * Gets the storage related information about the partition. This applies mostly in the case of unstructured data stored as files. + * @return storage information related properties + */ + StorageInfo getStorageInfo(); + + /** + * Gets the audit information like created date, last update date etc.. + * @return audit information + */ + AuditInfo getAuditInfo(); +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorPartitionDetailImpl.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorPartitionDetailImpl.java new file mode 100644 index 000000000..c3546c036 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorPartitionDetailImpl.java @@ -0,0 +1,62 @@ +package com.facebook.presto.spi; + +import java.util.Map; + +/** + * Created by amajumdar on 2/2/15. + */ +public class ConnectorPartitionDetailImpl implements ConnectorPartitionDetail{ + private final String partitionId; + private final TupleDomain tupleDomain; + private final StorageInfo storageInfo; + private Map metadata; + private final AuditInfo auditInfo; + + public ConnectorPartitionDetailImpl(String partitionId, + TupleDomain tupleDomain, Map metadata) { + this(partitionId, tupleDomain, null, metadata, null); + } + + public ConnectorPartitionDetailImpl(String partitionId, + TupleDomain tupleDomain, StorageInfo storageInfo, Map metadata) { + this(partitionId, tupleDomain, storageInfo, metadata, null); + } + + public ConnectorPartitionDetailImpl(String partitionId, + TupleDomain tupleDomain, StorageInfo storageInfo, Map metadata, AuditInfo auditInfo) { + this.partitionId = partitionId; + this.tupleDomain = tupleDomain; + this.storageInfo = storageInfo; + this.metadata = metadata; + this.auditInfo = auditInfo!=null?auditInfo:new AuditInfo(); + } + + @Override + public Map getMetadata() { + return metadata; + } + + @Override + public StorageInfo getStorageInfo() { + return storageInfo; + } + + @Override + public String getPartitionId() { + return partitionId; + } + + @Override + public TupleDomain getTupleDomain() { + return tupleDomain; + } + + @Override + public AuditInfo getAuditInfo() { + return auditInfo; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorSchemaMetadata.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorSchemaMetadata.java new file mode 100644 index 000000000..ff782f9a2 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorSchemaMetadata.java @@ -0,0 +1,40 @@ +package com.facebook.presto.spi; + +import com.google.common.collect.Maps; + +import java.util.Map; + +/** + * Created by amajumdar on 3/9/16. + */ +public class ConnectorSchemaMetadata { + private String schemaName; + private String uri; + private Map metadata; + + public ConnectorSchemaMetadata(String schemaName) { + this(schemaName, null); + } + + public ConnectorSchemaMetadata(String schemaName, String uri) { + this(schemaName, uri, Maps.newHashMap()); + } + + public ConnectorSchemaMetadata(String schemaName, String uri, Map metadata) { + this.schemaName = schemaName; + this.uri = uri; + this.metadata = metadata; + } + + public String getSchemaName() { + return schemaName; + } + + public String getUri() { + return uri; + } + + public Map getMetadata() { + return metadata; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorSplitDetailManager.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorSplitDetailManager.java new file mode 100644 index 000000000..445607baf --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorSplitDetailManager.java @@ -0,0 +1,52 @@ +package com.facebook.presto.spi; + +import com.google.common.collect.Lists; + +import java.util.List; + +/** + * Created by amajumdar on 2/2/15. + */ +public interface ConnectorSplitDetailManager extends ConnectorSplitManager{ + /** + * Gets the Partitions based on a filter expression for the specified table. + * @param table table handle + * @param filterExpression JSP based filter expression string + * @param partitionNames filter the list that matches the given partition names. If null or empty, it will return all. + * @return filtered list of partitions + */ + ConnectorPartitionResult getPartitions(ConnectorTableHandle table, String filterExpression, List partitionNames, Sort sort, Pageable pageable, boolean includePartitionDetails); + + /** + * Add/Update/delete partitions for a table + * @param table table handle + * @param partitions list of partitions + * @param partitionIdsForDeletes list of partition ids/names for deletes + * @return added/updated list of partition names + */ + SavePartitionResult savePartitions(ConnectorTableHandle table, List partitions, List partitionIdsForDeletes, boolean checkIfExists); + + /** + * Delete partitions for a table + * @param table table handle + * @param partitionIds list of partition names + */ + void deletePartitions(ConnectorTableHandle table, List partitionIds); + + /** + * Number of partitions for the given table + * @param connectorHandle table handle + * @return Number of partitions + */ + Integer getPartitionCount(ConnectorTableHandle connectorHandle); + + /** + * Returns all the partition names referring to the given uri + * @param uri location + * @param prefixSearch if tru, we look for tables whose location starts with the given uri + * @return list of partition names + */ + default List getPartitionNames(String uri, boolean prefixSearch){ + return Lists.newArrayList(); + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorTableDetailMetadata.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorTableDetailMetadata.java new file mode 100644 index 000000000..9aaf9622a --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/ConnectorTableDetailMetadata.java @@ -0,0 +1,61 @@ +package com.facebook.presto.spi; + +import com.google.common.collect.Maps; + +import java.util.List; +import java.util.Map; + +/** + * This class contains any extra metadata about the table. This was created initially for the serde info that exists in + * hive. + * + * Created by amajumdar on 1/15/15. + */ +public class ConnectorTableDetailMetadata extends ConnectorTableMetadata { + private StorageInfo storageInfo; + private Map metadata; + private AuditInfo auditInfo; + public ConnectorTableDetailMetadata(SchemaTableName table, + List columns, Map metadata) { + this( table, columns, null, null, metadata, null); + } + + public ConnectorTableDetailMetadata(SchemaTableName table, + List columns, StorageInfo storageInfo, Map metadata) { + this( table, columns, null, storageInfo, metadata, null); + } + + public ConnectorTableDetailMetadata(SchemaTableName table, List columns + , String owner, StorageInfo storageInfo, Map metadata + , AuditInfo auditInfo) + { + super(table, columns, Maps.newHashMap(), owner, false); + this.storageInfo = storageInfo; + this.metadata = metadata; + this.auditInfo = auditInfo!=null?auditInfo:new AuditInfo(); + } + + public StorageInfo getStorageInfo() { + return storageInfo; + } + + public void setStorageInfo(StorageInfo storageInfo) { + this.storageInfo = storageInfo; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public AuditInfo getAuditInfo() { + return auditInfo; + } + + public void setAuditInfo(AuditInfo auditInfo) { + this.auditInfo = auditInfo; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/Pageable.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/Pageable.java new file mode 100644 index 000000000..4d7112a87 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/Pageable.java @@ -0,0 +1,38 @@ +package com.facebook.presto.spi; + +/** + * Represents the pagination information + * Created by amajumdar on 3/16/15. + */ +public class Pageable { + private Integer limit; + private Integer offset; + + public Pageable() { + } + + public Pageable(Integer limit, Integer offset) { + this.limit = limit; + this.offset = offset; + } + + public Integer getLimit() { + return limit; + } + + public void setLimit(Integer limit) { + this.limit = limit; + } + + public Integer getOffset() { + return offset==null?0:offset; + } + + public void setOffset(Integer offset) { + this.offset = offset; + } + + public boolean isPageable(){ + return limit != null; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/SavePartitionResult.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/SavePartitionResult.java new file mode 100644 index 000000000..e57a69d3b --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/SavePartitionResult.java @@ -0,0 +1,37 @@ +package com.facebook.presto.spi; + +import java.util.ArrayList; +import java.util.List; + +/** + * Created by amajumdar on 7/20/15. + */ +public class SavePartitionResult { + List added; + List updated; + + public SavePartitionResult() { + added = new ArrayList<>(); + updated = new ArrayList<>(); + } + + public List getAdded() { + return added; + } + + public void setAdded(List added) { + if( added != null) { + this.added = added; + } + } + + public List getUpdated() { + return updated; + } + + public void setUpdated(List updated) { + if( updated != null) { + this.updated = updated; + } + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/SchemaTablePartitionName.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/SchemaTablePartitionName.java new file mode 100644 index 000000000..676323bd9 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/SchemaTablePartitionName.java @@ -0,0 +1,22 @@ +package com.facebook.presto.spi; + +/** + * Created by amajumdar on 7/13/15. + */ +public class SchemaTablePartitionName { + private final SchemaTableName tableName; + private final String partitionId; + + public SchemaTablePartitionName(SchemaTableName tableName, String partitionId) { + this.tableName = tableName; + this.partitionId = partitionId; + } + + public String getPartitionId() { + return partitionId; + } + + public SchemaTableName getTableName() { + return tableName; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/Sort.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/Sort.java new file mode 100644 index 000000000..354dfbbbf --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/Sort.java @@ -0,0 +1,37 @@ +package com.facebook.presto.spi; + +/** + * Created by amajumdar on 3/16/15. + */ +public class Sort { + private String sortBy; + private SortOrder order; + + public Sort() { + } + + public Sort(String sortBy, SortOrder order) { + this.sortBy = sortBy; + this.order = order; + } + + public String getSortBy() { + return sortBy; + } + + public void setSortBy(String sortBy) { + this.sortBy = sortBy; + } + + public SortOrder getOrder() { + return order==null?SortOrder.ASC:order; + } + + public void setOrder(SortOrder order) { + this.order = order; + } + + public boolean hasSort(){ + return sortBy != null; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/SortOrder.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/SortOrder.java new file mode 100644 index 000000000..cfa76202e --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/SortOrder.java @@ -0,0 +1,8 @@ +package com.facebook.presto.spi; + +/** + * Created by amajumdar on 3/16/15. + */ +public enum SortOrder { + ASC, DESC +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/spi/StorageInfo.java b/metacat-common-server/src/main/java/com/facebook/presto/spi/StorageInfo.java new file mode 100644 index 000000000..2f3c5c953 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/spi/StorageInfo.java @@ -0,0 +1,81 @@ +package com.facebook.presto.spi; + +import java.util.Map; + +/** + * Created by amajumdar on 3/3/15. + */ +public class StorageInfo { + /* Location of the data */ + private String uri; + /* Input format of the file */ + private String inputFormat; + /* Output format of the file */ + private String outputFormat; + /* Serialization library */ + private String serializationLib; + /* Serialization parameters */ + private Map parameters; + private Map serdeInfoParameters; + + public StorageInfo() { + } + + public StorageInfo(String uri, String inputFormat, String outputFormat, String serializationLib, + Map parameters, Map serdeInfoParameters) { + this.uri = uri; + this.inputFormat = inputFormat; + this.outputFormat = outputFormat; + this.serializationLib = serializationLib; + this.parameters = parameters; + this.serdeInfoParameters = serdeInfoParameters; + } + + public String getInputFormat() { + return inputFormat; + } + + public void setInputFormat(String inputFormat) { + this.inputFormat = inputFormat; + } + + public String getOutputFormat() { + return outputFormat; + } + + public void setOutputFormat(String outputFormat) { + this.outputFormat = outputFormat; + } + + public String getSerializationLib() { + return serializationLib; + } + + public void setSerializationLib(String serializationLib) { + this.serializationLib = serializationLib; + } + + public Map getParameters() { + return parameters; + } + + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + public Map getSerdeInfoParameters() { + return serdeInfoParameters; + } + + public void setSerdeInfoParameters(Map serdeInfoParameters) { + this.serdeInfoParameters = serdeInfoParameters; + } + + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } +} diff --git a/metacat-common-server/src/main/java/com/facebook/presto/type/FloatType.java b/metacat-common-server/src/main/java/com/facebook/presto/type/FloatType.java new file mode 100644 index 000000000..a37c27ee5 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/type/FloatType.java @@ -0,0 +1,87 @@ +package com.facebook.presto.type; + +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.block.Block; +import com.facebook.presto.spi.block.BlockBuilder; +import com.facebook.presto.spi.type.AbstractFixedWidthType; + +import static com.facebook.presto.spi.type.TypeSignature.parseTypeSignature; +import static io.airlift.slice.SizeOf.SIZE_OF_FLOAT; + +public final class FloatType extends AbstractFixedWidthType +{ + public static final FloatType FLOAT = new FloatType(); + public static final String TYPE = "float"; + + private FloatType() + { + super(parseTypeSignature(TYPE), float.class, SIZE_OF_FLOAT); + } + + @Override + public boolean isComparable() + { + return true; + } + + @Override + public boolean isOrderable() + { + return true; + } + + @Override + public Object getObjectValue(ConnectorSession session, Block block, int position) + { + if (block.isNull(position)) { + return null; + } + return block.getFloat(position, 0); + } + + @Override + public boolean equalTo(Block leftBlock, int leftPosition, Block rightBlock, int rightPosition) + { + float leftValue = leftBlock.getFloat(leftPosition, 0); + float rightValue = rightBlock.getFloat(rightPosition, 0); + return leftValue == rightValue; + } + + @Override + public int hash(Block block, int position) + { + long value = block.getLong(position, 0); + return (int) (value ^ (value >>> 32)); + } + + @Override + public int compareTo(Block leftBlock, int leftPosition, Block rightBlock, int rightPosition) + { + float leftValue = leftBlock.getFloat(leftPosition, 0); + float rightValue = rightBlock.getFloat(rightPosition, 0); + return Double.compare(leftValue, rightValue); + } + + @Override + public void appendTo(Block block, int position, BlockBuilder blockBuilder) + { + if (block.isNull(position)) { + blockBuilder.appendNull(); + } + else { + blockBuilder.writeFloat(block.getFloat(position, 0)).closeEntry(); + } + } + + @Override + public double getDouble(Block block, int position) + { + return block.getFloat(position, 0); + } + + @Override + public void writeDouble(BlockBuilder blockBuilder, double value) + { + blockBuilder.writeFloat((float)value).closeEntry(); + } +} \ No newline at end of file diff --git a/metacat-common-server/src/main/java/com/facebook/presto/type/IntType.java b/metacat-common-server/src/main/java/com/facebook/presto/type/IntType.java new file mode 100644 index 000000000..ac0e34788 --- /dev/null +++ b/metacat-common-server/src/main/java/com/facebook/presto/type/IntType.java @@ -0,0 +1,80 @@ +package com.facebook.presto.type; + +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.block.Block; +import com.facebook.presto.spi.block.BlockBuilder; +import com.facebook.presto.spi.type.AbstractFixedWidthType; + +import static com.facebook.presto.spi.type.TypeSignature.parseTypeSignature; +import static io.airlift.slice.SizeOf.SIZE_OF_INT; + +/** + * Created by amajumdar on 6/15/15. + */ +public final class IntType extends AbstractFixedWidthType { + public static final IntType INT = new IntType(); + public static final String TYPE = "int"; + + private IntType() { + super(parseTypeSignature(TYPE), int.class, SIZE_OF_INT); + } + + @Override + public boolean isComparable() { + return true; + } + + @Override + public boolean isOrderable() { + return true; + } + + @Override + public Object getObjectValue(ConnectorSession session, Block block, int position) { + if (block.isNull(position)) { + return null; + } + + return block.getInt(position, 0); + } + + @Override + public boolean equalTo(Block leftBlock, int leftPosition, Block rightBlock, int rightPosition) { + long leftValue = leftBlock.getInt(leftPosition, 0); + long rightValue = rightBlock.getInt(rightPosition, 0); + return leftValue == rightValue; + } + + @Override + public int hash(Block block, int position) { + long value = block.getInt(position, 0); + return (int) (value ^ (value >>> 32)); + } + + @Override + @SuppressWarnings("SuspiciousNameCombination") + public int compareTo(Block leftBlock, int leftPosition, Block rightBlock, int rightPosition) { + int leftValue = leftBlock.getInt(leftPosition, 0); + int rightValue = rightBlock.getInt(rightPosition, 0); + return Integer.compare(leftValue, rightValue); + } + + @Override + public void appendTo(Block block, int position, BlockBuilder blockBuilder) { + if (block.isNull(position)) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeInt(block.getInt(position, 0)).closeEntry(); + } + } + + @Override + public long getLong(Block block, int position) { + return block.getInt(position, 0); + } + + @Override + public void writeLong(BlockBuilder blockBuilder, long value) { + blockBuilder.writeInt((int)value).closeEntry(); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/model/Lookup.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/model/Lookup.java new file mode 100644 index 000000000..b6f8d0afd --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/model/Lookup.java @@ -0,0 +1,103 @@ +package com.netflix.metacat.common.model; + +import com.netflix.metacat.common.server.Config; + +import javax.inject.Inject; +import java.util.Date; +import java.util.Set; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 6/30/15. + */ +public class Lookup { + private static Config config; + private Long id; + private String name; + private String type = "string"; + private Set values; + private Date dateCreated; + private Date lastUpdated; + private String createdBy; + private String lastUpdatedBy; + + public Lookup() { + checkNotNull(config, "config should have been set in the static setConfig"); + createdBy = lastUpdatedBy = config.getLookupServiceUserAdmin(); + } + + /** + * This must be called statically to set the config before the class can be used. + * + * @param config the metacat configuration + */ + @Inject + public static void setConfig(Config config) { + Lookup.config = config; + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Set getValues() { + return values; + } + + public void setValues(Set values) { + this.values = values; + } + + public Date getDateCreated() { + return dateCreated; + } + + public void setDateCreated(Date dateCreated) { + this.dateCreated = dateCreated; + } + + public Date getLastUpdated() { + return lastUpdated; + } + + public void setLastUpdated(Date lastUpdated) { + this.lastUpdated = lastUpdated; + } + + public String getCreatedBy() { + return createdBy; + } + + public void setCreatedBy(String createdBy) { + this.createdBy = createdBy; + } + + public String getLastUpdatedBy() { + return lastUpdatedBy; + } + + public void setLastUpdatedBy(String lastUpdatedBy) { + this.lastUpdatedBy = lastUpdatedBy; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/model/TagItem.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/model/TagItem.java new file mode 100644 index 000000000..bee7fc27a --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/model/TagItem.java @@ -0,0 +1,94 @@ +package com.netflix.metacat.common.model; + +import com.netflix.metacat.common.server.Config; + +import javax.inject.Inject; +import java.util.Date; +import java.util.Set; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 6/30/15. + */ +public class TagItem { + private static Config config; + private Long id; + private String name; + private Set values; + private Date dateCreated; + private Date lastUpdated; + private String createdBy; + private String lastUpdatedBy; + + public TagItem() { + checkNotNull(config, "config should have been set in the static setConfig"); + createdBy = lastUpdatedBy = config.getLookupServiceUserAdmin(); + } + + /** + * This must be called statically to set the config before the class can be used. + * + * @param config the metacat configuration + */ + @Inject + public static void setConfig(Config config) { + TagItem.config = config; + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Set getValues() { + return values; + } + + public void setValues(Set values) { + this.values = values; + } + + public Date getDateCreated() { + return dateCreated; + } + + public void setDateCreated(Date dateCreated) { + this.dateCreated = dateCreated; + } + + public Date getLastUpdated() { + return lastUpdated; + } + + public void setLastUpdated(Date lastUpdated) { + this.lastUpdated = lastUpdated; + } + + public String getCreatedBy() { + return createdBy; + } + + public void setCreatedBy(String createdBy) { + this.createdBy = createdBy; + } + + public String getLastUpdatedBy() { + return lastUpdatedBy; + } + + public void setLastUpdatedBy(String lastUpdatedBy) { + this.lastUpdatedBy = lastUpdatedBy; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/CounterWrapper.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/CounterWrapper.java new file mode 100644 index 000000000..becd9abe2 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/CounterWrapper.java @@ -0,0 +1,48 @@ +package com.netflix.metacat.common.monitoring; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.servo.DefaultMonitorRegistry; +import com.netflix.servo.monitor.Counter; +import com.netflix.servo.monitor.Monitors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import java.util.concurrent.ExecutionException; + +/** + * Servo counter wrapper + * + * @author amajumdar + */ +public class CounterWrapper { + private static final LoadingCache COUNTERS = CacheBuilder.newBuilder() + .build( + new CacheLoader() { + public Counter load(@Nonnull String counterName) { + Counter counter = Monitors.newCounter(counterName); + DefaultMonitorRegistry.getInstance().register(counter); + return counter; + } + }); + private static final Logger log = LoggerFactory.getLogger(CounterWrapper.class); + + public static void incrementCounter(String counterName, long incrementAmount) { + try { + Counter counter = COUNTERS.get(counterName); + if (incrementAmount == 1) { + counter.increment(); + } else { + counter.increment(incrementAmount); + } + } catch (ExecutionException ex) { + log.warn("Error fetching counter: {}", counterName, ex); + } + } + + public static void incrementCounter(String counterName) { + incrementCounter(counterName, 1); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/DynamicGauge.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/DynamicGauge.java new file mode 100644 index 000000000..10712d021 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/DynamicGauge.java @@ -0,0 +1,140 @@ +package com.netflix.metacat.common.monitoring; + +import com.google.common.base.MoreObjects; +import com.google.common.base.Throwables; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.ImmutableList; +import com.netflix.servo.DefaultMonitorRegistry; +import com.netflix.servo.monitor.CompositeMonitor; +import com.netflix.servo.monitor.DoubleGauge; +import com.netflix.servo.monitor.Monitor; +import com.netflix.servo.monitor.MonitorConfig; +import com.netflix.servo.monitor.Monitors; +import com.netflix.servo.tag.TagList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import java.util.List; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Utility class that dynamically creates gauges based on an arbitrary (name, tagList), or {@link com.netflix.servo.monitor.MonitorConfig} + * Gauges are automatically expired after 15 minutes of inactivity. + */ +public final class DynamicGauge implements CompositeMonitor { + private static final Logger log = LoggerFactory.getLogger(DynamicGauge.class); + private static final String DEFAULT_EXPIRATION = "15"; + private static final String DEFAULT_EXPIRATION_UNIT = "MINUTES"; + private static final String CLASS_NAME = DynamicGauge.class.getCanonicalName(); + private static final String EXPIRATION_PROP = CLASS_NAME + ".expiration"; + private static final String EXPIRATION_PROP_UNIT = CLASS_NAME + ".expirationUnit"; + private static final String INTERNAL_ID = "servoGauges"; + private static final String CACHE_MONITOR_ID = "servoGaugesCache"; + private static final MonitorConfig BASE_CONFIG = new MonitorConfig.Builder(INTERNAL_ID).build(); + + private static final DynamicGauge INSTANCE = new DynamicGauge(); + + private final LoadingCache gauges; + private final CompositeMonitor cacheMonitor; + + private DynamicGauge() { + final String expiration = System.getProperty(EXPIRATION_PROP, DEFAULT_EXPIRATION); + final String expirationUnit = System.getProperty(EXPIRATION_PROP_UNIT, DEFAULT_EXPIRATION_UNIT); + final long expirationValue = Long.parseLong(expiration); + final TimeUnit expirationUnitValue = TimeUnit.valueOf(expirationUnit); + + gauges = CacheBuilder.newBuilder() + .expireAfterAccess(expirationValue, expirationUnitValue) + .build(new CacheLoader() { + @Override + public DoubleGauge load(@Nonnull final MonitorConfig config) throws Exception { + return new DoubleGauge(config); + } + }); + cacheMonitor = Monitors.newCacheMonitor(CACHE_MONITOR_ID, gauges); + DefaultMonitorRegistry.getInstance().register(this); + } + + /** + * Set a gauge based on a given {@link MonitorConfig} by a given value. + * + * @param config The monitoring config + * @param value The amount added to the current value + */ + public static void set(MonitorConfig config, double value) { + INSTANCE.get(config).set(value); + } + + /** + * Increment a gauge specified by a name. + */ + public static void set(String name, double value) { + set(MonitorConfig.builder(name).build(), value); + } + + /** + * Set the gauge for a given name, tagList by a given value. + */ + public static void set(String name, TagList list, double value) { + final MonitorConfig config = MonitorConfig.builder(name).withTags(list).build(); + set(config, value); + } + + private DoubleGauge get(MonitorConfig config) { + try { + return gauges.get(config); + } catch (ExecutionException e) { + log.error("Failed to get a gauge for {}: {}", config, e.getMessage()); + throw Throwables.propagate(e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public List> getMonitors() { + final ConcurrentMap gaugesMap = gauges.asMap(); + return ImmutableList.copyOf(gaugesMap.values()); + } + + /** + * {@inheritDoc} + */ + @Override + public Long getValue() { + return (long) gauges.asMap().size(); + } + + @Override + public Long getValue(int pollerIndex) { + return getValue(); + } + + /** + * {@inheritDoc} + */ + @Override + public MonitorConfig getConfig() { + return BASE_CONFIG; + } + + /** + * {@inheritDoc} + */ + @Override + public String toString() { + ConcurrentMap map = gauges.asMap(); + return MoreObjects.toStringHelper(this) + .add("baseConfig", BASE_CONFIG) + .add("totalGauges", map.size()) + .add("gauges", map) + .toString(); + } +} + diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/LogConstants.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/LogConstants.java new file mode 100644 index 000000000..d60ac0183 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/LogConstants.java @@ -0,0 +1,104 @@ +package com.netflix.metacat.common.monitoring; + +/** + * Created by amajumdar on 11/4/14. + */ +public enum LogConstants { + /* + General logging constants + */ + GlobalPrefix("dse"), + AppPrefix(GlobalPrefix + ".metacat"), + /* + Counters + */ + CounterCreateCatalog(AppPrefix+".countCreateCatalog"), + CounterCreateTable(AppPrefix+".countCreateTable"), + CounterCreateDatabase(AppPrefix + ".countCreateDatabase"), + CounterCreateMView(AppPrefix + ".countCreateMView"), + CounterDeleteDatabase(AppPrefix + ".countDeleteDatabase"), + CounterDeleteTablePartitions(AppPrefix + ".countDeleteTablePartitions"), + CounterDeleteMViewPartitions(AppPrefix + ".countDeleteMViewPartitions"), + CounterDeleteTable(AppPrefix + ".countDropTable"), + CounterDeleteMView(AppPrefix + ".countDeleteMView"), + CounterGetCatalog(AppPrefix+".countGetMetadata"), + CounterGetCatalogNames(AppPrefix+".countGetCatalogNames"), + CounterGetDatabase(AppPrefix+".countGetDatabase"), + CounterGetMViewPartitions(AppPrefix+".countGetMViewPartitions"), + CounterGetTablePartitions(AppPrefix+".countGetTablePartitions"), + CounterGetTable(AppPrefix+".countGetTable"), + CounterGetMView(AppPrefix+".countGetMView"), + CounterGetCatalogMViews(AppPrefix+".countGetCatalogMViews"), + CounterGetTableMViews(AppPrefix+".countGetTableMViews"), + CounterRenameTable(AppPrefix+".countRenameTable"), + CounterUpdateCatalog(AppPrefix+".countUpdateCatalog"), + CounterUpdateTable(AppPrefix+".countUpdateTable"), + CounterSaveTablePartitions(AppPrefix+".countSaveTablePartitions"), + CounterSaveMViewPartitions(AppPrefix+".countSaveMViewPartitions"), + CounterCreateCatalogFailure(AppPrefix+".countCreateCatalogFailure"), + CounterCreateTableFailure(AppPrefix+".countCreateTableFailure"), + CounterCreateDatabaseFailure(AppPrefix + ".countCreateDatabaseFailure"), + CounterCreateMViewFailure(AppPrefix + ".countCreateMViewFailure"), + CounterDeleteDatabaseFailure(AppPrefix + ".countDeleteDatabaseFailure"), + CounterDeleteTablePartitionsFailure(AppPrefix + ".countDeleteTablePartitionsFailure"), + CounterDeleteMViewPartitionsFailure(AppPrefix + ".countDeleteMViewPartitionsFailure"), + CounterDeleteTableFailure(AppPrefix + ".countDropTableFailure"), + CounterDeleteMViewFailure(AppPrefix + ".countDeleteMViewFailure"), + CounterGetCatalogFailure(AppPrefix+".countGetMetadataFailure"), + CounterGetCatalogNamesFailure(AppPrefix+".countGetCatalogNamesFailure"), + CounterGetDatabaseFailure(AppPrefix+".countGetDatabaseFailure"), + CounterGetMViewPartitionsFailure(AppPrefix+".countGetMViewPartitionsFailure"), + CounterGetTablePartitionsFailure(AppPrefix+".countGetTablePartitionsFailure"), + CounterGetTableFailure(AppPrefix+".countGetTableFailure"), + CounterGetMViewFailure(AppPrefix+".countGetMViewFailure"), + CounterGetCatalogMViewsFailure(AppPrefix+".countGetCatalogMViewsFailure"), + CounterGetTableMViewsFailure(AppPrefix+".countGetTableMViewsFailure"), + CounterRenameTableFailure(AppPrefix+".countRenameTableFailure"), + CounterUpdateCatalogFailure(AppPrefix+".countUpdateCatalogFailure"), + CounterUpdateTableFailure(AppPrefix+".countUpdateTableFailure"), + CounterSaveTablePartitionsFailure(AppPrefix+".countSaveTablePartitionsFailure"), + CounterSaveMViewPartitionsFailure(AppPrefix+".countSaveMViewPartitionsFailure"), + /* + Tracers + */ + TracerCreateCatalog(AppPrefix+".traceCreateCatalog"), + TracerCreateTable(AppPrefix+".traceCreateTable"), + TracerCreateDatabase(AppPrefix + ".traceCreateDatabase"), + TracerCreateMView(AppPrefix + ".traceCreateMView"), + TracerDeleteDatabase(AppPrefix + ".traceDeleteDatabase"), + TracerDeleteTablePartitions(AppPrefix + ".traceDeleteTablePartitions"), + TracerDeleteMViewPartitions(AppPrefix + ".traceDeleteMViewPartitions"), + TracerDeleteTable(AppPrefix + ".traceDropTable"), + TracerDeleteMView(AppPrefix + ".traceDeleteMView"), + TracerGetCatalog(AppPrefix+".traceGetMetadata"), + TracerGetCatalogNames(AppPrefix+".traceGetCatalogNames"), + TracerGetDatabase(AppPrefix+".traceGetDatabase"), + TracerGetMViewPartitions(AppPrefix+".traceGetMViewPartitions"), + TracerGetTablePartitions(AppPrefix+".traceGetTablePartitions"), + TracerGetTable(AppPrefix+".traceGetTable"), + TracerGetMView(AppPrefix+".traceGetMView"), + TracerGetCatalogMViews(AppPrefix+".traceGetCatalogMViews"), + TracerGetTableMViews(AppPrefix+".traceGetTableMViews"), + TracerRenameTable(AppPrefix+".traceRenameTable"), + TracerUpdateCatalog(AppPrefix+".traceUpdateCatalog"), + TracerUpdateTable(AppPrefix+".traceUpdateTable"), + TracerSaveTablePartitions(AppPrefix+".traceSaveTablePartitions"), + TracerSaveMViewPartitions(AppPrefix+".traceSaveMViewPartitions"), + /* + Gauges + */ + GaugeAddPartitions(AppPrefix+".gaugeAddPartitions"), + GaugeDeletePartitions(AppPrefix+".gaugeDeletePartitions"), + GaugeGetPartitionsCount(AppPrefix+".gaugeGetPartitionsCount"); + + private final String constant; + + LogConstants(String constant) { + this.constant = constant; + } + + @Override + public String toString() { + return constant; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/TimerWrapper.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/TimerWrapper.java new file mode 100644 index 000000000..b2153b3ee --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/monitoring/TimerWrapper.java @@ -0,0 +1,131 @@ +package com.netflix.metacat.common.monitoring; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.servo.DefaultMonitorRegistry; +import com.netflix.servo.monitor.MonitorConfig; +import com.netflix.servo.monitor.Monitors; +import com.netflix.servo.monitor.Stopwatch; +import com.netflix.servo.monitor.Timer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Servo Timer wrapper + * + * @author amajumdar + */ +public class TimerWrapper { + private static final Stopwatch NULL_STOPWATCH = new Stopwatch() { + @Override + public long getDuration(TimeUnit timeUnit) { + return 0; + } + + @Override + public long getDuration() { + return 0; + } + + @Override + public void reset() { + } + + @Override + public void start() { + } + + @Override + public void stop() { + } + }; + private static final Timer NULL_TIMER = new Timer() { + @Override + public MonitorConfig getConfig() { + return null; + } + + @Override + public TimeUnit getTimeUnit() { + return null; + } + + @Override + public Long getValue(int pollerIndex) { + return null; + } + + @Override + public Long getValue() { + return null; + } + + @Override + public void record(long duration, TimeUnit timeUnit) { + + } + + @Override + public void record(long duration) { + + } + + @Override + public Stopwatch start() { + return NULL_STOPWATCH; + } + }; + private static final LoadingCache TIMERS = CacheBuilder.newBuilder() + .build( + new CacheLoader() { + public Timer load(@Nonnull String timerName) { + Timer timer = Monitors.newTimer(timerName); + DefaultMonitorRegistry.getInstance().register(timer); + return timer; + } + }); + private static final Logger log = LoggerFactory.getLogger(TimerWrapper.class); + private final String name; + private final Timer timer; + private Stopwatch stopwatch; + + private TimerWrapper(String name) { + this.name = name; + Timer t = NULL_TIMER; + try { + t = TIMERS.get(name); + } catch (ExecutionException ex) { + log.warn("Error fetching timer: {}", name, ex); + } + this.timer = t; + } + + public static TimerWrapper createStarted(String name) { + TimerWrapper wrapper = new TimerWrapper(name); + wrapper.start(); + return wrapper; + } + + public static TimerWrapper createStopped(String name) { + return new TimerWrapper(name); + } + + public void start() { + stopwatch = timer.start(); + } + + public long stop() { + stopwatch.stop(); + return stopwatch.getDuration(TimeUnit.MILLISECONDS); + } + + @Override + public String toString() { + return "Timer{" + name + " - " + stopwatch.getDuration(TimeUnit.MILLISECONDS) + "ms}"; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTAND.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTAND.java new file mode 100644 index 000000000..d037136fd --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTAND.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTAND.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTAND extends SimpleNode { + public ASTAND(int id) { + super(id); + } + + public ASTAND(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=47624dd380cfe00f384e5c8af03d69b3 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTEQ.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTEQ.java new file mode 100644 index 000000000..21285dd50 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTEQ.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTEQ.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTEQ extends SimpleNode { + public ASTEQ(int id) { + super(id); + } + + public ASTEQ(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=bbf13f81c94ea7197914ce9f46cc3526 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTEVAL.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTEVAL.java new file mode 100644 index 000000000..db6eac3d6 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTEVAL.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTEVAL.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTEVAL extends SimpleNode { + public ASTEVAL(int id) { + super(id); + } + + public ASTEVAL(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=44c4f3e64c75fbaff63b52dea370f72e (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTFILTER.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTFILTER.java new file mode 100644 index 000000000..c64814026 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTFILTER.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTFILTER.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTFILTER extends SimpleNode { + public ASTFILTER(int id) { + super(id); + } + + public ASTFILTER(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=502a3e691142a2ee92a5d005f0a1bb28 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTGT.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTGT.java new file mode 100644 index 000000000..01bdcc422 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTGT.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTGT.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTGT extends SimpleNode { + public ASTGT(int id) { + super(id); + } + + public ASTGT(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=97fa861ee8d9421ccb94612e513bc388 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTGTE.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTGTE.java new file mode 100644 index 000000000..21e9c32dc --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTGTE.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTGTE.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTGTE extends SimpleNode { + public ASTGTE(int id) { + super(id); + } + + public ASTGTE(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=64550e3fbfe981b802deef725b683e1a (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLIKE.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLIKE.java new file mode 100644 index 000000000..9bf81a777 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLIKE.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTLIKE.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTLIKE extends SimpleNode { + public ASTLIKE(int id) { + super(id); + } + + public ASTLIKE(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=4c9049dc18265f1076e67d7fbab0250d (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLT.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLT.java new file mode 100644 index 000000000..714f579fd --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLT.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTLT.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTLT extends SimpleNode { + public ASTLT(int id) { + super(id); + } + + public ASTLT(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=32e29fc080692c40e10bb780be9e3e00 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLTE.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLTE.java new file mode 100644 index 000000000..4e2e6f009 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTLTE.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTLTE.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTLTE extends SimpleNode { + public ASTLTE(int id) { + super(id); + } + + public ASTLTE(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=5166b2cf4d389162d081829f20e53bb9 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTMATCHES.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTMATCHES.java new file mode 100644 index 000000000..6fe9d4569 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTMATCHES.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTMATCHES.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTMATCHES extends SimpleNode { + public ASTMATCHES(int id) { + super(id); + } + + public ASTMATCHES(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=db244b83ab6cf6833b7dbd96aea93a2c (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNEQ.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNEQ.java new file mode 100644 index 000000000..32ee4de42 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNEQ.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTNEQ.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTNEQ extends SimpleNode { + public ASTNEQ(int id) { + super(id); + } + + public ASTNEQ(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=4ed9b560c660a3074aeba567abf5c70b (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNEVAL.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNEVAL.java new file mode 100644 index 000000000..3d0dce047 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNEVAL.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTNEVAL.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTNEVAL extends SimpleNode { + public ASTNEVAL(int id) { + super(id); + } + + public ASTNEVAL(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=751b8701055f6f9fe23ea83e1e531f14 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNUM.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNUM.java new file mode 100644 index 000000000..b04091629 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTNUM.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTNUM.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTNUM extends SimpleNode { + public ASTNUM(int id) { + super(id); + } + + public ASTNUM(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=8c978f5115fa4de48e54d30148a58bf1 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTOR.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTOR.java new file mode 100644 index 000000000..897218c7c --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTOR.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTOR.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTOR extends SimpleNode { + public ASTOR(int id) { + super(id); + } + + public ASTOR(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=b1f74c0c73a8c4b265e886c9e24da36a (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTSTRING.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTSTRING.java new file mode 100644 index 000000000..1226c7710 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTSTRING.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTSTRING.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTSTRING extends SimpleNode { + public ASTSTRING(int id) { + super(id); + } + + public ASTSTRING(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=8d75dfe50ee57545277a64f4f0a63221 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTVAR.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTVAR.java new file mode 100644 index 000000000..c61b52199 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ASTVAR.java @@ -0,0 +1,23 @@ +/* Generated By:JJTree: Do not edit this line. ASTVAR.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class ASTVAR extends SimpleNode { + public ASTVAR(int id) { + super(id); + } + + public ASTVAR(PartitionParser p, int id) { + super(p, id); + } + + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) { + + return + visitor.visit(this, data); + } +} +/* JavaCC - OriginalChecksum=69622d5c2212551b77e16c4e9145bc5c (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/JJTPartitionParserState.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/JJTPartitionParserState.java new file mode 100644 index 000000000..aaacd8765 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/JJTPartitionParserState.java @@ -0,0 +1,123 @@ +/* Generated By:JavaCC: Do not edit this line. JJTPartitionParserState.java Version 6.1_2 */ +package com.netflix.metacat.common.partition.parser; + +public class JJTPartitionParserState { + private java.util.List nodes; + private java.util.List marks; + + private int sp; // number of nodes on stack + private int mk; // current mark + private boolean node_created; + + public JJTPartitionParserState() { + nodes = new java.util.ArrayList(); + marks = new java.util.ArrayList(); + sp = 0; + mk = 0; + } + + /* Determines whether the current node was actually closed and + pushed. This should only be called in the final user action of a + node scope. */ + public boolean nodeCreated() { + return node_created; + } + + /* Call this to reinitialize the node stack. It is called + automatically by the parser's ReInit() method. */ + public void reset() { + nodes.clear(); + marks.clear(); + sp = 0; + mk = 0; + } + + /* Returns the root node of the AST. It only makes sense to call + this after a successful parse. */ + public Node rootNode() { + return nodes.get(0); + } + + /* Pushes a node on to the stack. */ + public void pushNode(Node n) { + nodes.add(n); + ++sp; + } + + /* Returns the node on the top of the stack, and remove it from the + stack. */ + public Node popNode() { + if (--sp < mk) { + mk = marks.remove(marks.size()-1); + } + return nodes.remove(nodes.size()-1); + } + + /* Returns the node currently on the top of the stack. */ + public Node peekNode() { + return nodes.get(nodes.size()-1); + } + + /* Returns the number of children on the stack in the current node + scope. */ + public int nodeArity() { + return sp - mk; + } + + + public void clearNodeScope(Node n) { + while (sp > mk) { + popNode(); + } + mk = marks.remove(marks.size()-1); + } + + + public void openNodeScope(Node n) { + marks.add(mk); + mk = sp; + n.jjtOpen(); + } + + + /* A definite node is constructed from a specified number of + children. That number of nodes are popped from the stack and + made the children of the definite node. Then the definite node + is pushed on to the stack. */ + public void closeNodeScope(Node n, int num) { + mk = marks.remove(marks.size()-1); + while (num-- > 0) { + Node c = popNode(); + c.jjtSetParent(n); + n.jjtAddChild(c, num); + } + n.jjtClose(); + pushNode(n); + node_created = true; + } + + + /* A conditional node is constructed if its condition is true. All + the nodes that have been pushed since the node was opened are + made children of the conditional node, which is then pushed + on to the stack. If the condition is false the node is not + constructed and they are left on the stack. */ + public void closeNodeScope(Node n, boolean condition) { + if (condition) { + int a = nodeArity(); + mk = marks.remove(marks.size()-1); + while (a-- > 0) { + Node c = popNode(); + c.jjtSetParent(n); + n.jjtAddChild(c, a); + } + n.jjtClose(); + pushNode(n); + node_created = true; + } else { + mk = marks.remove(marks.size()-1); + node_created = false; + } + } +} +/* JavaCC - OriginalChecksum=15653e080246fc9384199e756e610179 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Node.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Node.java new file mode 100644 index 000000000..770a00ce0 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Node.java @@ -0,0 +1,41 @@ +/* Generated By:JJTree: Do not edit this line. Node.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +/* All AST nodes must implement this interface. It provides basic + machinery for constructing the parent and child relationships + between nodes. */ + +public +interface Node { + + /** This method is called after the node has been made the current + node. It indicates that child nodes can now be added to it. */ + public void jjtOpen(); + + /** This method is called after all the child nodes have been + added. */ + public void jjtClose(); + + /** This pair of methods are used to inform the node of its + parent. */ + public void jjtSetParent(Node n); + public Node jjtGetParent(); + + /** This method tells the node to add its argument to the node's + list of children. */ + public void jjtAddChild(Node n, int i); + + /** This method returns a child node. The children are numbered + from zero, left to right. */ + public Node jjtGetChild(int i); + + /** Return the number of children the node has. */ + public int jjtGetNumChildren(); + + public int getId(); + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data); +} +/* JavaCC - OriginalChecksum=334ed1ebebd1735ac4fc4c275c58338c (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ParseException.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ParseException.java new file mode 100644 index 000000000..97af41160 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/ParseException.java @@ -0,0 +1,193 @@ +/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 6.1 */ +/* JavaCCOptions:KEEP_LINE_COLUMN=true */ +package com.netflix.metacat.common.partition.parser; + +/** + * This exception is thrown when parse errors are encountered. + * You can explicitly create objects of this exception type by + * calling the method generateParseException in the generated + * parser. + * + * You can modify this class to customize your error reporting + * mechanisms so long as you retain the public fields. + */ +public class ParseException extends Exception { + + /** + * The version identifier for this Serializable class. + * Increment only if the serialized form of the + * class changes. + */ + private static final long serialVersionUID = 1L; + + /** + * The end of line string for this machine. + */ + protected static String EOL = System.getProperty("line.separator", "\n"); + + /** + * This constructor is used by the method "generateParseException" + * in the generated parser. Calling this constructor generates + * a new object of this type with the fields "currentToken", + * "expectedTokenSequences", and "tokenImage" set. + */ + public ParseException(Token currentTokenVal, + int[][] expectedTokenSequencesVal, + String[] tokenImageVal + ) + { + super(initialise(currentTokenVal, expectedTokenSequencesVal, tokenImageVal)); + currentToken = currentTokenVal; + expectedTokenSequences = expectedTokenSequencesVal; + tokenImage = tokenImageVal; + } + + /** + * The following constructors are for use by you for whatever + * purpose you can think of. Constructing the exception in this + * manner makes the exception behave in the normal way - i.e., as + * documented in the class "Throwable". The fields "errorToken", + * "expectedTokenSequences", and "tokenImage" do not contain + * relevant information. The JavaCC generated code does not use + * these constructors. + */ + + public ParseException() { + super(); + } + + /** Constructor with message. */ + public ParseException(String message) { + super(message); + } + + + /** + * This is the last token that has been consumed successfully. If + * this object has been created due to a parse error, the token + * followng this token will (therefore) be the first error token. + */ + public Token currentToken; + + /** + * Each entry in this array is an array of integers. Each array + * of integers represents a sequence of tokens (by their ordinal + * values) that is expected at this point of the parse. + */ + public int[][] expectedTokenSequences; + + /** + * This is a reference to the "tokenImage" array of the generated + * parser within which the parse error occurred. This array is + * defined in the generated ...Constants interface. + */ + public String[] tokenImage; + + /** + * It uses "currentToken" and "expectedTokenSequences" to generate a parse + * error message and returns it. If this object has been created + * due to a parse error, and you do not catch it (it gets thrown + * from the parser) the correct error message + * gets displayed. + */ + private static String initialise(Token currentToken, + int[][] expectedTokenSequences, + String[] tokenImage) { + + StringBuffer expected = new StringBuffer(); + int maxSize = 0; + for (int i = 0; i < expectedTokenSequences.length; i++) { + if (maxSize < expectedTokenSequences[i].length) { + maxSize = expectedTokenSequences[i].length; + } + for (int j = 0; j < expectedTokenSequences[i].length; j++) { + expected.append(tokenImage[expectedTokenSequences[i][j]]).append(' '); + } + if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) { + expected.append("..."); + } + expected.append(EOL).append(" "); + } + String retval = "Encountered \""; + Token tok = currentToken.next; + for (int i = 0; i < maxSize; i++) { + if (i != 0) retval += " "; + if (tok.kind == 0) { + retval += tokenImage[0]; + break; + } + retval += " " + tokenImage[tok.kind]; + retval += " \""; + retval += add_escapes(tok.image); + retval += " \""; + tok = tok.next; + } + retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn; + retval += "." + EOL; + + + if (expectedTokenSequences.length == 0) { + // Nothing to add here + } else { + if (expectedTokenSequences.length == 1) { + retval += "Was expecting:" + EOL + " "; + } else { + retval += "Was expecting one of:" + EOL + " "; + } + retval += expected.toString(); + } + + return retval; + } + + + /** + * Used to convert raw characters to their escaped version + * when these raw version cannot be used as part of an ASCII + * string literal. + */ + static String add_escapes(String str) { + StringBuffer retval = new StringBuffer(); + char ch; + for (int i = 0; i < str.length(); i++) { + switch (str.charAt(i)) + { + case '\b': + retval.append("\\b"); + continue; + case '\t': + retval.append("\\t"); + continue; + case '\n': + retval.append("\\n"); + continue; + case '\f': + retval.append("\\f"); + continue; + case '\r': + retval.append("\\r"); + continue; + case '\"': + retval.append("\\\""); + continue; + case '\'': + retval.append("\\\'"); + continue; + case '\\': + retval.append("\\\\"); + continue; + default: + if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { + String s = "0000" + Integer.toString(ch, 16); + retval.append("\\u" + s.substring(s.length() - 4, s.length())); + } else { + retval.append(ch); + } + continue; + } + } + return retval.toString(); + } + +} +/* JavaCC - OriginalChecksum=ebf7c49bc9f512da02492cb828cad7f5 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParser.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParser.java new file mode 100644 index 000000000..b34b18ad4 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParser.java @@ -0,0 +1,752 @@ +/* PartitionParser.java */ +/* Generated By:JJTree&JavaCC: Do not edit this line. PartitionParser.java */ +package com.netflix.metacat.common.partition.parser; +public class PartitionParser/*@bgen(jjtree)*/implements PartitionParserTreeConstants, PartitionParserConstants {/*@bgen(jjtree)*/ + protected JJTPartitionParserState jjtree = new JJTPartitionParserState();public static void main (String args []) throws ParseException { + PartitionParser parser = new PartitionParser(new java.io.StringReader(args[0])); + SimpleNode root = parser.filter(); + root.dump(""); + System.out.println(root.jjtAccept(new com.netflix.metacat.common.partition.visitor.PartitionParserEval(), null)); + } + + final public SimpleNode filter() throws ParseException {/*@bgen(jjtree) FILTER */ + ASTFILTER jjtn000 = new ASTFILTER(JJTFILTER); + boolean jjtc000 = true; + jjtree.openNodeScope(jjtn000); + try { + expr(); +jjtree.closeNodeScope(jjtn000, true); + jjtc000 = false; +{if ("" != null) return jjtn000;} + } catch (Throwable jjte000) { +if (jjtc000) { + jjtree.clearNodeScope(jjtn000); + jjtc000 = false; + } else { + jjtree.popNode(); + } + if (jjte000 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte000;} + } + if (jjte000 instanceof ParseException) { + {if (true) throw (ParseException)jjte000;} + } + {if (true) throw (Error)jjte000;} + } finally { +if (jjtc000) { + jjtree.closeNodeScope(jjtn000, true); + } + } + throw new Error("Missing return statement in function"); + } + + final public void expr() throws ParseException { + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case LPAREN:{ + jj_consume_token(LPAREN); + expr(); + jj_consume_token(RPAREN); + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case AND: + case OR:{ + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case AND:{ + jj_consume_token(AND); +ASTAND jjtn001 = new ASTAND(JJTAND); + boolean jjtc001 = true; + jjtree.openNodeScope(jjtn001); + try { + expr(); + } catch (Throwable jjte001) { +if (jjtc001) { + jjtree.clearNodeScope(jjtn001); + jjtc001 = false; + } else { + jjtree.popNode(); + } + if (jjte001 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte001;} + } + if (jjte001 instanceof ParseException) { + {if (true) throw (ParseException)jjte001;} + } + {if (true) throw (Error)jjte001;} + } finally { +if (jjtc001) { + jjtree.closeNodeScope(jjtn001, 2); + } + } + break; + } + case OR:{ + jj_consume_token(OR); +ASTOR jjtn002 = new ASTOR(JJTOR); + boolean jjtc002 = true; + jjtree.openNodeScope(jjtn002); + try { + expr(); + } catch (Throwable jjte002) { +if (jjtc002) { + jjtree.clearNodeScope(jjtn002); + jjtc002 = false; + } else { + jjtree.popNode(); + } + if (jjte002 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte002;} + } + if (jjte002 instanceof ParseException) { + {if (true) throw (ParseException)jjte002;} + } + {if (true) throw (Error)jjte002;} + } finally { +if (jjtc002) { + jjtree.closeNodeScope(jjtn002, 2); + } + } + break; + } + default: + jj_la1[0] = jj_gen; + jj_consume_token(-1); + throw new ParseException(); + } + break; + } + default: + jj_la1[1] = jj_gen; + ; + } + break; + } + case NOT:{ + neval(); + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case AND: + case OR:{ + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case AND:{ + jj_consume_token(AND); +ASTAND jjtn003 = new ASTAND(JJTAND); + boolean jjtc003 = true; + jjtree.openNodeScope(jjtn003); + try { + expr(); + } catch (Throwable jjte003) { +if (jjtc003) { + jjtree.clearNodeScope(jjtn003); + jjtc003 = false; + } else { + jjtree.popNode(); + } + if (jjte003 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte003;} + } + if (jjte003 instanceof ParseException) { + {if (true) throw (ParseException)jjte003;} + } + {if (true) throw (Error)jjte003;} + } finally { +if (jjtc003) { + jjtree.closeNodeScope(jjtn003, 2); + } + } + break; + } + case OR:{ + jj_consume_token(OR); +ASTOR jjtn004 = new ASTOR(JJTOR); + boolean jjtc004 = true; + jjtree.openNodeScope(jjtn004); + try { + expr(); + } catch (Throwable jjte004) { +if (jjtc004) { + jjtree.clearNodeScope(jjtn004); + jjtc004 = false; + } else { + jjtree.popNode(); + } + if (jjte004 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte004;} + } + if (jjte004 instanceof ParseException) { + {if (true) throw (ParseException)jjte004;} + } + {if (true) throw (Error)jjte004;} + } finally { +if (jjtc004) { + jjtree.closeNodeScope(jjtn004, 2); + } + } + break; + } + default: + jj_la1[2] = jj_gen; + jj_consume_token(-1); + throw new ParseException(); + } + break; + } + default: + jj_la1[3] = jj_gen; + ; + } + break; + } + case INT: + case FLOAT: + case VARIABLE: + case QUOTE: + case SQUOTE:{ + eval(); + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case AND: + case OR:{ + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case AND:{ + jj_consume_token(AND); +ASTAND jjtn005 = new ASTAND(JJTAND); + boolean jjtc005 = true; + jjtree.openNodeScope(jjtn005); + try { + expr(); + } catch (Throwable jjte005) { +if (jjtc005) { + jjtree.clearNodeScope(jjtn005); + jjtc005 = false; + } else { + jjtree.popNode(); + } + if (jjte005 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte005;} + } + if (jjte005 instanceof ParseException) { + {if (true) throw (ParseException)jjte005;} + } + {if (true) throw (Error)jjte005;} + } finally { +if (jjtc005) { + jjtree.closeNodeScope(jjtn005, 2); + } + } + break; + } + case OR:{ + jj_consume_token(OR); +ASTOR jjtn006 = new ASTOR(JJTOR); + boolean jjtc006 = true; + jjtree.openNodeScope(jjtn006); + try { + expr(); + } catch (Throwable jjte006) { +if (jjtc006) { + jjtree.clearNodeScope(jjtn006); + jjtc006 = false; + } else { + jjtree.popNode(); + } + if (jjte006 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte006;} + } + if (jjte006 instanceof ParseException) { + {if (true) throw (ParseException)jjte006;} + } + {if (true) throw (Error)jjte006;} + } finally { +if (jjtc006) { + jjtree.closeNodeScope(jjtn006, 2); + } + } + break; + } + default: + jj_la1[4] = jj_gen; + jj_consume_token(-1); + throw new ParseException(); + } + break; + } + default: + jj_la1[5] = jj_gen; + ; + } + break; + } + default: + jj_la1[6] = jj_gen; + jj_consume_token(-1); + throw new ParseException(); + } + } + + final public void neval() throws ParseException {/*@bgen(jjtree) NEVAL */ + ASTNEVAL jjtn000 = new ASTNEVAL(JJTNEVAL); + boolean jjtc000 = true; + jjtree.openNodeScope(jjtn000); + try { + jj_consume_token(NOT); + eval(); + } catch (Throwable jjte000) { +if (jjtc000) { + jjtree.clearNodeScope(jjtn000); + jjtc000 = false; + } else { + jjtree.popNode(); + } + if (jjte000 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte000;} + } + if (jjte000 instanceof ParseException) { + {if (true) throw (ParseException)jjte000;} + } + {if (true) throw (Error)jjte000;} + } finally { +if (jjtc000) { + jjtree.closeNodeScope(jjtn000, true); + } + } + } + + final public void eval() throws ParseException {/*@bgen(jjtree) EVAL */ + ASTEVAL jjtn000 = new ASTEVAL(JJTEVAL); + boolean jjtc000 = true; + jjtree.openNodeScope(jjtn000); + try { + term(); + comp(); + term(); + } catch (Throwable jjte000) { +if (jjtc000) { + jjtree.clearNodeScope(jjtn000); + jjtc000 = false; + } else { + jjtree.popNode(); + } + if (jjte000 instanceof RuntimeException) { + {if (true) throw (RuntimeException)jjte000;} + } + if (jjte000 instanceof ParseException) { + {if (true) throw (ParseException)jjte000;} + } + {if (true) throw (Error)jjte000;} + } finally { +if (jjtc000) { + jjtree.closeNodeScope(jjtn000, true); + } + } + } + + final public void comp() throws ParseException { + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case GT:{ +ASTGT jjtn001 = new ASTGT(JJTGT); + boolean jjtc001 = true; + jjtree.openNodeScope(jjtn001); + try { + jj_consume_token(GT); + } finally { +if (jjtc001) { + jjtree.closeNodeScope(jjtn001, true); + } + } + break; + } + case LT:{ +ASTLT jjtn002 = new ASTLT(JJTLT); + boolean jjtc002 = true; + jjtree.openNodeScope(jjtn002); + try { + jj_consume_token(LT); + } finally { +if (jjtc002) { + jjtree.closeNodeScope(jjtn002, true); + } + } + break; + } + case LTE:{ +ASTLTE jjtn003 = new ASTLTE(JJTLTE); + boolean jjtc003 = true; + jjtree.openNodeScope(jjtn003); + try { + jj_consume_token(LTE); + } finally { +if (jjtc003) { + jjtree.closeNodeScope(jjtn003, true); + } + } + break; + } + case GTE:{ +ASTGTE jjtn004 = new ASTGTE(JJTGTE); + boolean jjtc004 = true; + jjtree.openNodeScope(jjtn004); + try { + jj_consume_token(GTE); + } finally { +if (jjtc004) { + jjtree.closeNodeScope(jjtn004, true); + } + } + break; + } + case EQUAL:{ +ASTEQ jjtn005 = new ASTEQ(JJTEQ); + boolean jjtc005 = true; + jjtree.openNodeScope(jjtn005); + try { + jj_consume_token(EQUAL); + } finally { +if (jjtc005) { + jjtree.closeNodeScope(jjtn005, true); + } + } + break; + } + case NEQUAL:{ +ASTNEQ jjtn006 = new ASTNEQ(JJTNEQ); + boolean jjtc006 = true; + jjtree.openNodeScope(jjtn006); + try { + jj_consume_token(NEQUAL); + } finally { +if (jjtc006) { + jjtree.closeNodeScope(jjtn006, true); + } + } + break; + } + case MATCHES:{ +ASTMATCHES jjtn007 = new ASTMATCHES(JJTMATCHES); + boolean jjtc007 = true; + jjtree.openNodeScope(jjtn007); + try { + jj_consume_token(MATCHES); + } finally { +if (jjtc007) { + jjtree.closeNodeScope(jjtn007, true); + } + } + break; + } + case LIKE:{ +ASTLIKE jjtn008 = new ASTLIKE(JJTLIKE); + boolean jjtc008 = true; + jjtree.openNodeScope(jjtn008); + try { + jj_consume_token(LIKE); + } finally { +if (jjtc008) { + jjtree.closeNodeScope(jjtn008, true); + } + } + break; + } + default: + jj_la1[7] = jj_gen; + jj_consume_token(-1); + throw new ParseException(); + } + } + + final public void term() throws ParseException {Token t; + StringBuilder builder = new StringBuilder(); + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case INT:{ + t = jj_consume_token(INT); +ASTNUM jjtn001 = new ASTNUM(JJTNUM); + boolean jjtc001 = true; + jjtree.openNodeScope(jjtn001); + try { +jjtree.closeNodeScope(jjtn001, true); + jjtc001 = false; +jjtn001.value = new java.math.BigDecimal(t.image); + } finally { +if (jjtc001) { + jjtree.closeNodeScope(jjtn001, true); + } + } + break; + } + case FLOAT:{ + t = jj_consume_token(FLOAT); +ASTNUM jjtn002 = new ASTNUM(JJTNUM); + boolean jjtc002 = true; + jjtree.openNodeScope(jjtn002); + try { +jjtree.closeNodeScope(jjtn002, true); + jjtc002 = false; +jjtn002.value = new java.math.BigDecimal(t.image); + } finally { +if (jjtc002) { + jjtree.closeNodeScope(jjtn002, true); + } + } + break; + } + case QUOTE:{ + jj_consume_token(QUOTE); + label_1: + while (true) { + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case CHAR:{ + ; + break; + } + default: + jj_la1[8] = jj_gen; + break label_1; + } + t = jj_consume_token(CHAR); +builder.append(t.image); + } + jj_consume_token(ENDQUOTE); +ASTSTRING jjtn003 = new ASTSTRING(JJTSTRING); + boolean jjtc003 = true; + jjtree.openNodeScope(jjtn003); + try { +jjtree.closeNodeScope(jjtn003, true); + jjtc003 = false; +jjtn003.value = builder.toString(); + } finally { +if (jjtc003) { + jjtree.closeNodeScope(jjtn003, true); + } + } + break; + } + case SQUOTE:{ + jj_consume_token(SQUOTE); + label_2: + while (true) { + switch ((jj_ntk==-1)?jj_ntk_f():jj_ntk) { + case SCHAR:{ + ; + break; + } + default: + jj_la1[9] = jj_gen; + break label_2; + } + t = jj_consume_token(SCHAR); +builder.append(t.image); + } + jj_consume_token(SENDQUOTE); +ASTSTRING jjtn004 = new ASTSTRING(JJTSTRING); + boolean jjtc004 = true; + jjtree.openNodeScope(jjtn004); + try { +jjtree.closeNodeScope(jjtn004, true); + jjtc004 = false; +jjtn004.value = builder.toString(); + } finally { +if (jjtc004) { + jjtree.closeNodeScope(jjtn004, true); + } + } + break; + } + case VARIABLE:{ + t = jj_consume_token(VARIABLE); +ASTVAR jjtn005 = new ASTVAR(JJTVAR); + boolean jjtc005 = true; + jjtree.openNodeScope(jjtn005); + try { +jjtree.closeNodeScope(jjtn005, true); + jjtc005 = false; +jjtn005.value = new Variable(t.image); + } finally { +if (jjtc005) { + jjtree.closeNodeScope(jjtn005, true); + } + } + break; + } + default: + jj_la1[10] = jj_gen; + jj_consume_token(-1); + throw new ParseException(); + } + } + + /** Generated Token Manager. */ + public PartitionParserTokenManager token_source; + SimpleCharStream jj_input_stream; + /** Current token. */ + public Token token; + /** Next token. */ + public Token jj_nt; + private int jj_ntk; + private int jj_gen; + final private int[] jj_la1 = new int[11]; + static private int[] jj_la1_0; + static { + jj_la1_init_0(); + } + private static void jj_la1_init_0() { + jj_la1_0 = new int[] {0x60000,0x60000,0x60000,0x60000,0x60000,0x60000,0x3482a00,0x3001f8,0x8000000,0x20000000,0x3402800,}; + } + + /** Constructor with InputStream. */ + public PartitionParser(java.io.InputStream stream) { + this(stream, null); + } + /** Constructor with InputStream and supplied encoding */ + public PartitionParser(java.io.InputStream stream, String encoding) { + try { jj_input_stream = new SimpleCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } + token_source = new PartitionParserTokenManager(jj_input_stream); + token = new Token(); + jj_ntk = -1; + jj_gen = 0; + for (int i = 0; i < 11; i++) jj_la1[i] = -1; + } + + /** Reinitialise. */ + public void ReInit(java.io.InputStream stream) { + ReInit(stream, null); + } + /** Reinitialise. */ + public void ReInit(java.io.InputStream stream, String encoding) { + try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); } + token_source.ReInit(jj_input_stream); + token = new Token(); + jj_ntk = -1; + jjtree.reset(); + jj_gen = 0; + for (int i = 0; i < 11; i++) jj_la1[i] = -1; + } + + /** Constructor. */ + public PartitionParser(java.io.Reader stream) { + jj_input_stream = new SimpleCharStream(stream, 1, 1); + token_source = new PartitionParserTokenManager(jj_input_stream); + token = new Token(); + jj_ntk = -1; + jj_gen = 0; + for (int i = 0; i < 11; i++) jj_la1[i] = -1; + } + + /** Reinitialise. */ + public void ReInit(java.io.Reader stream) { + if (jj_input_stream == null) { + jj_input_stream = new SimpleCharStream(stream, 1, 1); + } else { + jj_input_stream.ReInit(stream, 1, 1); + } + if (token_source == null) { + token_source = new PartitionParserTokenManager(jj_input_stream); + } + + token_source.ReInit(jj_input_stream); + token = new Token(); + jj_ntk = -1; + jjtree.reset(); + jj_gen = 0; + for (int i = 0; i < 11; i++) jj_la1[i] = -1; + } + + /** Constructor with generated Token Manager. */ + public PartitionParser(PartitionParserTokenManager tm) { + token_source = tm; + token = new Token(); + jj_ntk = -1; + jj_gen = 0; + for (int i = 0; i < 11; i++) jj_la1[i] = -1; + } + + /** Reinitialise. */ + public void ReInit(PartitionParserTokenManager tm) { + token_source = tm; + token = new Token(); + jj_ntk = -1; + jjtree.reset(); + jj_gen = 0; + for (int i = 0; i < 11; i++) jj_la1[i] = -1; + } + + private Token jj_consume_token(int kind) throws ParseException { + Token oldToken; + if ((oldToken = token).next != null) token = token.next; + else token = token.next = token_source.getNextToken(); + jj_ntk = -1; + if (token.kind == kind) { + jj_gen++; + return token; + } + token = oldToken; + jj_kind = kind; + throw generateParseException(); + } + + +/** Get the next Token. */ + final public Token getNextToken() { + if (token.next != null) token = token.next; + else token = token.next = token_source.getNextToken(); + jj_ntk = -1; + jj_gen++; + return token; + } + +/** Get the specific Token. */ + final public Token getToken(int index) { + Token t = token; + for (int i = 0; i < index; i++) { + if (t.next != null) t = t.next; + else t = t.next = token_source.getNextToken(); + } + return t; + } + + private int jj_ntk_f() { + if ((jj_nt=token.next) == null) + return (jj_ntk = (token.next=token_source.getNextToken()).kind); + else + return (jj_ntk = jj_nt.kind); + } + + private java.util.List jj_expentries = new java.util.ArrayList(); + private int[] jj_expentry; + private int jj_kind = -1; + + /** Generate ParseException. */ + public ParseException generateParseException() { + jj_expentries.clear(); + boolean[] la1tokens = new boolean[30]; + if (jj_kind >= 0) { + la1tokens[jj_kind] = true; + jj_kind = -1; + } + for (int i = 0; i < 11; i++) { + if (jj_la1[i] == jj_gen) { + for (int j = 0; j < 32; j++) { + if ((jj_la1_0[i] & (1<"> +| < GTE: ">=" > +| < GT: ">" > +| < LTE: "<=" > +| < LT: "<" > +| < LPAREN : "("> +| < RPAREN : ")"> +| < INT: ( )+ > +| < #DIGIT: ["0" - "9"] > +| < FLOAT: | ( )? > +| < #EXPONENT: ["e", "E"] ("+"|"-")? > +| < #MANTISSA: "." ( )* | ( )* "." > +| < #DIGITS : (["0"-"9"])+ > +| < AND: "and" | "&&"> +| < OR: "or" | "||"> +| < NOT: "not"> +| < MATCHES: "matches"> +| < LIKE: "like"> +| < VARIABLE : "_" < CHARS > | ["A"-"Z"] < CHARS > | ["a"-"z"] > +| < #CHARS : (["a"-"z","0"-"9","_","A"-"Z"])+ > +| < QUOTE:"\"" > : STRING_STATE +| < SQUOTE:"\'" > : SSTRING_STATE +} + TOKEN: +{ + > : DEFAULT +| +} + TOKEN: +{ + > : DEFAULT +| +} +SimpleNode filter() : +{/*@bgen(jjtree) FILTER */ + ASTFILTER jjtn000 = new ASTFILTER(JJTFILTER); + boolean jjtc000 = true; + jjtree.openNodeScope(jjtn000); +/*@egen*/ }{/*@bgen(jjtree) FILTER */ + try { +/*@egen*/ + expr()/*@bgen(jjtree)*/ + { + jjtree.closeNodeScope(jjtn000, true); + jjtc000 = false; + } +/*@egen*/ { return jjtn000; }/*@bgen(jjtree)*/ + } catch (Throwable jjte000) { + if (jjtc000) { + jjtree.clearNodeScope(jjtn000); + jjtc000 = false; + } else { + jjtree.popNode(); + } + if (jjte000 instanceof RuntimeException) { + throw (RuntimeException)jjte000; + } + if (jjte000 instanceof ParseException) { + throw (ParseException)jjte000; + } + throw (Error)jjte000; + } finally { + if (jjtc000) { + jjtree.closeNodeScope(jjtn000, true); + } + } +/*@egen*/ +} +void expr(): +{ } +{ + < LPAREN > expr() < RPAREN > (< AND >/*@bgen(jjtree) #AND( 2) */ + { + ASTAND jjtn001 = new ASTAND(JJTAND); + boolean jjtc001 = true; + jjtree.openNodeScope(jjtn001); + } + try { +/*@egen*/ expr()/*@bgen(jjtree)*/ + } catch (Throwable jjte001) { + if (jjtc001) { + jjtree.clearNodeScope(jjtn001); + jjtc001 = false; + } else { + jjtree.popNode(); + } + if (jjte001 instanceof RuntimeException) { + throw (RuntimeException)jjte001; + } + if (jjte001 instanceof ParseException) { + throw (ParseException)jjte001; + } + throw (Error)jjte001; + } finally { + if (jjtc001) { + jjtree.closeNodeScope(jjtn001, 2); + } + } +/*@egen*/ | < OR >/*@bgen(jjtree) #OR( 2) */ + { + ASTOR jjtn002 = new ASTOR(JJTOR); + boolean jjtc002 = true; + jjtree.openNodeScope(jjtn002); + } + try { +/*@egen*/ expr()/*@bgen(jjtree)*/ + } catch (Throwable jjte002) { + if (jjtc002) { + jjtree.clearNodeScope(jjtn002); + jjtc002 = false; + } else { + jjtree.popNode(); + } + if (jjte002 instanceof RuntimeException) { + throw (RuntimeException)jjte002; + } + if (jjte002 instanceof ParseException) { + throw (ParseException)jjte002; + } + throw (Error)jjte002; + } finally { + if (jjtc002) { + jjtree.closeNodeScope(jjtn002, 2); + } + } +/*@egen*/ )? +| neval() (< AND >/*@bgen(jjtree) #AND( 2) */ + { + ASTAND jjtn003 = new ASTAND(JJTAND); + boolean jjtc003 = true; + jjtree.openNodeScope(jjtn003); + } + try { +/*@egen*/ expr()/*@bgen(jjtree)*/ + } catch (Throwable jjte003) { + if (jjtc003) { + jjtree.clearNodeScope(jjtn003); + jjtc003 = false; + } else { + jjtree.popNode(); + } + if (jjte003 instanceof RuntimeException) { + throw (RuntimeException)jjte003; + } + if (jjte003 instanceof ParseException) { + throw (ParseException)jjte003; + } + throw (Error)jjte003; + } finally { + if (jjtc003) { + jjtree.closeNodeScope(jjtn003, 2); + } + } +/*@egen*/ | < OR >/*@bgen(jjtree) #OR( 2) */ + { + ASTOR jjtn004 = new ASTOR(JJTOR); + boolean jjtc004 = true; + jjtree.openNodeScope(jjtn004); + } + try { +/*@egen*/ expr()/*@bgen(jjtree)*/ + } catch (Throwable jjte004) { + if (jjtc004) { + jjtree.clearNodeScope(jjtn004); + jjtc004 = false; + } else { + jjtree.popNode(); + } + if (jjte004 instanceof RuntimeException) { + throw (RuntimeException)jjte004; + } + if (jjte004 instanceof ParseException) { + throw (ParseException)jjte004; + } + throw (Error)jjte004; + } finally { + if (jjtc004) { + jjtree.closeNodeScope(jjtn004, 2); + } + } +/*@egen*/ )? +| eval() (< AND >/*@bgen(jjtree) #AND( 2) */ + { + ASTAND jjtn005 = new ASTAND(JJTAND); + boolean jjtc005 = true; + jjtree.openNodeScope(jjtn005); + } + try { +/*@egen*/ expr()/*@bgen(jjtree)*/ + } catch (Throwable jjte005) { + if (jjtc005) { + jjtree.clearNodeScope(jjtn005); + jjtc005 = false; + } else { + jjtree.popNode(); + } + if (jjte005 instanceof RuntimeException) { + throw (RuntimeException)jjte005; + } + if (jjte005 instanceof ParseException) { + throw (ParseException)jjte005; + } + throw (Error)jjte005; + } finally { + if (jjtc005) { + jjtree.closeNodeScope(jjtn005, 2); + } + } +/*@egen*/ | < OR >/*@bgen(jjtree) #OR( 2) */ + { + ASTOR jjtn006 = new ASTOR(JJTOR); + boolean jjtc006 = true; + jjtree.openNodeScope(jjtn006); + } + try { +/*@egen*/ expr()/*@bgen(jjtree)*/ + } catch (Throwable jjte006) { + if (jjtc006) { + jjtree.clearNodeScope(jjtn006); + jjtc006 = false; + } else { + jjtree.popNode(); + } + if (jjte006 instanceof RuntimeException) { + throw (RuntimeException)jjte006; + } + if (jjte006 instanceof ParseException) { + throw (ParseException)jjte006; + } + throw (Error)jjte006; + } finally { + if (jjtc006) { + jjtree.closeNodeScope(jjtn006, 2); + } + } +/*@egen*/ )? +} +void neval() : +{/*@bgen(jjtree) NEVAL */ + ASTNEVAL jjtn000 = new ASTNEVAL(JJTNEVAL); + boolean jjtc000 = true; + jjtree.openNodeScope(jjtn000); +/*@egen*/ } +{/*@bgen(jjtree) NEVAL */ + try { +/*@egen*/ + < NOT > eval()/*@bgen(jjtree)*/ + } catch (Throwable jjte000) { + if (jjtc000) { + jjtree.clearNodeScope(jjtn000); + jjtc000 = false; + } else { + jjtree.popNode(); + } + if (jjte000 instanceof RuntimeException) { + throw (RuntimeException)jjte000; + } + if (jjte000 instanceof ParseException) { + throw (ParseException)jjte000; + } + throw (Error)jjte000; + } finally { + if (jjtc000) { + jjtree.closeNodeScope(jjtn000, true); + } + } +/*@egen*/ +} +void eval() : +{/*@bgen(jjtree) EVAL */ + ASTEVAL jjtn000 = new ASTEVAL(JJTEVAL); + boolean jjtc000 = true; + jjtree.openNodeScope(jjtn000); +/*@egen*/ } +{/*@bgen(jjtree) EVAL */ + try { +/*@egen*/ + term() comp() term()/*@bgen(jjtree)*/ + } catch (Throwable jjte000) { + if (jjtc000) { + jjtree.clearNodeScope(jjtn000); + jjtc000 = false; + } else { + jjtree.popNode(); + } + if (jjte000 instanceof RuntimeException) { + throw (RuntimeException)jjte000; + } + if (jjte000 instanceof ParseException) { + throw (ParseException)jjte000; + } + throw (Error)jjte000; + } finally { + if (jjtc000) { + jjtree.closeNodeScope(jjtn000, true); + } + } +/*@egen*/ +} +void comp(): +{ } +{/*@bgen(jjtree) GT */ + { + ASTGT jjtn001 = new ASTGT(JJTGT); + boolean jjtc001 = true; + jjtree.openNodeScope(jjtn001); + } + try { +/*@egen*/ < GT >/*@bgen(jjtree)*/ + } finally { + if (jjtc001) { + jjtree.closeNodeScope(jjtn001, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) LT */ + { + ASTLT jjtn002 = new ASTLT(JJTLT); + boolean jjtc002 = true; + jjtree.openNodeScope(jjtn002); + } + try { +/*@egen*/ < LT >/*@bgen(jjtree)*/ + } finally { + if (jjtc002) { + jjtree.closeNodeScope(jjtn002, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) LTE */ + { + ASTLTE jjtn003 = new ASTLTE(JJTLTE); + boolean jjtc003 = true; + jjtree.openNodeScope(jjtn003); + } + try { +/*@egen*/ < LTE >/*@bgen(jjtree)*/ + } finally { + if (jjtc003) { + jjtree.closeNodeScope(jjtn003, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) GTE */ + { + ASTGTE jjtn004 = new ASTGTE(JJTGTE); + boolean jjtc004 = true; + jjtree.openNodeScope(jjtn004); + } + try { +/*@egen*/ < GTE >/*@bgen(jjtree)*/ + } finally { + if (jjtc004) { + jjtree.closeNodeScope(jjtn004, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) EQ */ + { + ASTEQ jjtn005 = new ASTEQ(JJTEQ); + boolean jjtc005 = true; + jjtree.openNodeScope(jjtn005); + } + try { +/*@egen*/ < EQUAL >/*@bgen(jjtree)*/ + } finally { + if (jjtc005) { + jjtree.closeNodeScope(jjtn005, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) NEQ */ + { + ASTNEQ jjtn006 = new ASTNEQ(JJTNEQ); + boolean jjtc006 = true; + jjtree.openNodeScope(jjtn006); + } + try { +/*@egen*/ < NEQUAL >/*@bgen(jjtree)*/ + } finally { + if (jjtc006) { + jjtree.closeNodeScope(jjtn006, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) MATCHES */ + { + ASTMATCHES jjtn007 = new ASTMATCHES(JJTMATCHES); + boolean jjtc007 = true; + jjtree.openNodeScope(jjtn007); + } + try { +/*@egen*/ < MATCHES >/*@bgen(jjtree)*/ + } finally { + if (jjtc007) { + jjtree.closeNodeScope(jjtn007, true); + } + } +/*@egen*/ +|/*@bgen(jjtree) LIKE */ + { + ASTLIKE jjtn008 = new ASTLIKE(JJTLIKE); + boolean jjtc008 = true; + jjtree.openNodeScope(jjtn008); + } + try { +/*@egen*/ < LIKE >/*@bgen(jjtree)*/ + } finally { + if (jjtc008) { + jjtree.closeNodeScope(jjtn008, true); + } + } +/*@egen*/ +} +void term(): +{ + Token t; + StringBuilder builder = new StringBuilder(); +} +{ + ( t = < INT >/*@bgen(jjtree) NUM */ + { + ASTNUM jjtn001 = new ASTNUM(JJTNUM); + boolean jjtc001 = true; + jjtree.openNodeScope(jjtn001); + } + try { +/*@egen*//*@bgen(jjtree)*/ + { + jjtree.closeNodeScope(jjtn001, true); + jjtc001 = false; + } +/*@egen*/ { jjtn001.value = new java.math.BigDecimal(t.image); }/*@bgen(jjtree)*/ + } finally { + if (jjtc001) { + jjtree.closeNodeScope(jjtn001, true); + } + } +/*@egen*/ ) + | ( t = < FLOAT >/*@bgen(jjtree) NUM */ + { + ASTNUM jjtn002 = new ASTNUM(JJTNUM); + boolean jjtc002 = true; + jjtree.openNodeScope(jjtn002); + } + try { +/*@egen*//*@bgen(jjtree)*/ + { + jjtree.closeNodeScope(jjtn002, true); + jjtc002 = false; + } +/*@egen*/ { jjtn002.value = new java.math.BigDecimal(t.image); }/*@bgen(jjtree)*/ + } finally { + if (jjtc002) { + jjtree.closeNodeScope(jjtn002, true); + } + } +/*@egen*/ ) + | ( < QUOTE > (t = < CHAR > { builder.append(t.image); })* < ENDQUOTE >/*@bgen(jjtree) STRING */ + { + ASTSTRING jjtn003 = new ASTSTRING(JJTSTRING); + boolean jjtc003 = true; + jjtree.openNodeScope(jjtn003); + } + try { +/*@egen*//*@bgen(jjtree)*/ + { + jjtree.closeNodeScope(jjtn003, true); + jjtc003 = false; + } +/*@egen*/ { jjtn003.value = builder.toString(); }/*@bgen(jjtree)*/ + } finally { + if (jjtc003) { + jjtree.closeNodeScope(jjtn003, true); + } + } +/*@egen*/ ) + | ( < SQUOTE > (t = < SCHAR > { builder.append(t.image); })* < SENDQUOTE >/*@bgen(jjtree) STRING */ + { + ASTSTRING jjtn004 = new ASTSTRING(JJTSTRING); + boolean jjtc004 = true; + jjtree.openNodeScope(jjtn004); + } + try { +/*@egen*//*@bgen(jjtree)*/ + { + jjtree.closeNodeScope(jjtn004, true); + jjtc004 = false; + } +/*@egen*/ { jjtn004.value = builder.toString(); }/*@bgen(jjtree)*/ + } finally { + if (jjtc004) { + jjtree.closeNodeScope(jjtn004, true); + } + } +/*@egen*/ ) + | ( t = < VARIABLE >/*@bgen(jjtree) VAR */ + { + ASTVAR jjtn005 = new ASTVAR(JJTVAR); + boolean jjtc005 = true; + jjtree.openNodeScope(jjtn005); + } + try { +/*@egen*//*@bgen(jjtree)*/ + { + jjtree.closeNodeScope(jjtn005, true); + jjtc005 = false; + } +/*@egen*/ { jjtn005.value = new Variable(t.image); }/*@bgen(jjtree)*/ + } finally { + if (jjtc005) { + jjtree.closeNodeScope(jjtn005, true); + } + } +/*@egen*/ ) +} \ No newline at end of file diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParser.jjt b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParser.jjt new file mode 100644 index 000000000..23136c138 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParser.jjt @@ -0,0 +1,103 @@ +options { + NODE_DEFAULT_VOID = true; + MULTI = true; + STATIC = false; + VISITOR = true; +} + +PARSER_BEGIN(PartitionParser) +package com.netflix.metacat.common.partition.parser; +public class PartitionParser { + public static void main (String args []) throws ParseException { + PartitionParser parser = new PartitionParser(new java.io.StringReader(args[0])); + SimpleNode root = parser.filter(); + root.dump(""); + System.out.println(root.jjtAccept(new com.netflix.metacat.common.partition.visitor.PartitionParserEval(), null)); + } +} +PARSER_END(PartitionParser) + +SKIP : +{ + " " | "\t" +} +TOKEN : +{ + < EQUAL: "==" | "="> +| < NEQUAL: "!=" | "<>"> +| < GTE: ">=" > +| < GT: ">" > +| < LTE: "<=" > +| < LT: "<" > +| < LPAREN : "("> +| < RPAREN : ")"> +| < INT: ( )+ > +| < #DIGIT: ["0" - "9"] > +| < FLOAT: | ( )? > +| < #EXPONENT: ["e", "E"] ("+"|"-")? > +| < #MANTISSA: "." ( )* | ( )* "." > +| < #DIGITS : (["0"-"9"])+ > +| < AND: "and" | "&&"> +| < OR: "or" | "||"> +| < NOT: "not"> +| < MATCHES: "matches"> +| < LIKE: "like"> +| < VARIABLE : "_" < CHARS > | ["A"-"Z"] < CHARS > | ["a"-"z"] > +| < #CHARS : (["a"-"z","0"-"9","_","A"-"Z"])+ > +| < QUOTE:"\"" > : STRING_STATE +| < SQUOTE:"\'" > : SSTRING_STATE +} + TOKEN: +{ + > : DEFAULT +| +} + TOKEN: +{ + > : DEFAULT +| +} +SimpleNode filter() #FILTER: +{ }{ + expr() { return jjtThis; } +} +void expr(): +{ } +{ + < LPAREN > expr() < RPAREN > (< AND > expr() #AND(2) | < OR > expr() #OR(2))? +| neval() (< AND > expr() #AND(2) | < OR > expr() #OR(2))? +| eval() (< AND > expr() #AND(2) | < OR > expr() #OR(2))? +} +void neval() #NEVAL: +{ } +{ + < NOT > eval() +} +void eval() #EVAL: +{ } +{ + term() comp() term() +} +void comp(): +{ } +{ < GT > #GT +| < LT > #LT +| < LTE > #LTE +| < GTE > #GTE +| < EQUAL > #EQ +| < NEQUAL > #NEQ +| < MATCHES > #MATCHES +| < LIKE > #LIKE +} +void term(): +{ + Token t; + StringBuilder builder = new StringBuilder(); +} +{ + ( t = < INT > { jjtThis.value = new java.math.BigDecimal(t.image); } #NUM ) + | ( t = < FLOAT > { jjtThis.value = new java.math.BigDecimal(t.image); } #NUM) + | ( < QUOTE > (t = < CHAR > { builder.append(t.image); })* < ENDQUOTE > { jjtThis.value = builder.toString(); } #STRING) + | ( < SQUOTE > (t = < SCHAR > { builder.append(t.image); })* < SENDQUOTE > { jjtThis.value = builder.toString(); } #STRING) + | ( t = < VARIABLE > { jjtThis.value = new Variable(t.image); } #VAR) +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserConstants.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserConstants.java new file mode 100644 index 000000000..e86fa5647 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserConstants.java @@ -0,0 +1,109 @@ +/* Generated By:JJTree&JavaCC: Do not edit this line. PartitionParserConstants.java */ +package com.netflix.metacat.common.partition.parser; + + +/** + * Token literal values and constants. + * Generated by org.javacc.parser.OtherFilesGen#start() + */ +public interface PartitionParserConstants { + + /** End of File. */ + int EOF = 0; + /** RegularExpression Id. */ + int EQUAL = 3; + /** RegularExpression Id. */ + int NEQUAL = 4; + /** RegularExpression Id. */ + int GTE = 5; + /** RegularExpression Id. */ + int GT = 6; + /** RegularExpression Id. */ + int LTE = 7; + /** RegularExpression Id. */ + int LT = 8; + /** RegularExpression Id. */ + int LPAREN = 9; + /** RegularExpression Id. */ + int RPAREN = 10; + /** RegularExpression Id. */ + int INT = 11; + /** RegularExpression Id. */ + int DIGIT = 12; + /** RegularExpression Id. */ + int FLOAT = 13; + /** RegularExpression Id. */ + int EXPONENT = 14; + /** RegularExpression Id. */ + int MANTISSA = 15; + /** RegularExpression Id. */ + int DIGITS = 16; + /** RegularExpression Id. */ + int AND = 17; + /** RegularExpression Id. */ + int OR = 18; + /** RegularExpression Id. */ + int NOT = 19; + /** RegularExpression Id. */ + int MATCHES = 20; + /** RegularExpression Id. */ + int LIKE = 21; + /** RegularExpression Id. */ + int VARIABLE = 22; + /** RegularExpression Id. */ + int CHARS = 23; + /** RegularExpression Id. */ + int QUOTE = 24; + /** RegularExpression Id. */ + int SQUOTE = 25; + /** RegularExpression Id. */ + int ENDQUOTE = 26; + /** RegularExpression Id. */ + int CHAR = 27; + /** RegularExpression Id. */ + int SENDQUOTE = 28; + /** RegularExpression Id. */ + int SCHAR = 29; + + /** Lexical state. */ + int DEFAULT = 0; + /** Lexical state. */ + int STRING_STATE = 1; + /** Lexical state. */ + int SSTRING_STATE = 2; + + /** Literal token values. */ + String[] tokenImage = { + "", + "\" \"", + "\"\\t\"", + "", + "", + "\">=\"", + "\">\"", + "\"<=\"", + "\"<\"", + "\"(\"", + "\")\"", + "", + "", + "", + "", + "", + "", + "", + "", + "\"not\"", + "\"matches\"", + "\"like\"", + "", + "", + "\"\\\"\"", + "\"\\\'\"", + "", + "", + "", + "", + }; + +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserDefaultVisitor.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserDefaultVisitor.java new file mode 100644 index 000000000..9fee1a7fe --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserDefaultVisitor.java @@ -0,0 +1,61 @@ +/* Generated By:JavaCC: Do not edit this line. PartitionParserDefaultVisitor.java Version 6.1_2 */ +package com.netflix.metacat.common.partition.parser; + +public class PartitionParserDefaultVisitor implements PartitionParserVisitor{ + public Object defaultVisit(SimpleNode node, Object data){ + node.childrenAccept(this, data); + return data; + } + public Object visit(SimpleNode node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTFILTER node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTAND node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTOR node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTNEVAL node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTEVAL node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTGT node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTLT node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTLTE node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTGTE node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTEQ node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTNEQ node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTMATCHES node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTLIKE node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTNUM node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTSTRING node, Object data){ + return defaultVisit(node, data); + } + public Object visit(ASTVAR node, Object data){ + return defaultVisit(node, data); + } +} +/* JavaCC - OriginalChecksum=449d8c0abd893cbe090860f00a25fca3 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserTokenManager.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserTokenManager.java new file mode 100644 index 000000000..014e34777 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserTokenManager.java @@ -0,0 +1,923 @@ +/* PartitionParserTokenManager.java */ +/* Generated By:JJTree&JavaCC: Do not edit this line. PartitionParserTokenManager.java */ +package com.netflix.metacat.common.partition.parser; + +/** Token Manager. */ +@SuppressWarnings("unused")public class PartitionParserTokenManager implements PartitionParserConstants { + + /** Debug output. */ + public java.io.PrintStream debugStream = System.out; + /** Set debug output. */ + public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; } +private final int jjStopStringLiteralDfa_0(int pos, long active0){ + switch (pos) + { + case 0: + if ((active0 & 0x380000L) != 0L) + return 26; + if ((active0 & 0x180L) != 0L) + return 5; + return -1; + case 1: + if ((active0 & 0x380000L) != 0L) + { + jjmatchedKind = 22; + jjmatchedPos = 1; + return 26; + } + return -1; + case 2: + if ((active0 & 0x80000L) != 0L) + return 26; + if ((active0 & 0x300000L) != 0L) + { + jjmatchedKind = 22; + jjmatchedPos = 2; + return 26; + } + return -1; + case 3: + if ((active0 & 0x200000L) != 0L) + return 26; + if ((active0 & 0x100000L) != 0L) + { + jjmatchedKind = 22; + jjmatchedPos = 3; + return 26; + } + return -1; + case 4: + if ((active0 & 0x100000L) != 0L) + { + jjmatchedKind = 22; + jjmatchedPos = 4; + return 26; + } + return -1; + case 5: + if ((active0 & 0x100000L) != 0L) + { + jjmatchedKind = 22; + jjmatchedPos = 5; + return 26; + } + return -1; + default : + return -1; + } +} +private final int jjStartNfa_0(int pos, long active0){ + return jjMoveNfa_0(jjStopStringLiteralDfa_0(pos, active0), pos + 1); +} +private int jjStopAtPos(int pos, int kind) +{ + jjmatchedKind = kind; + jjmatchedPos = pos; + return pos + 1; +} +private int jjMoveStringLiteralDfa0_0(){ + switch(curChar) + { + case 34: + return jjStopAtPos(0, 24); + case 39: + return jjStopAtPos(0, 25); + case 40: + return jjStopAtPos(0, 9); + case 41: + return jjStopAtPos(0, 10); + case 60: + jjmatchedKind = 8; + return jjMoveStringLiteralDfa1_0(0x80L); + case 62: + jjmatchedKind = 6; + return jjMoveStringLiteralDfa1_0(0x20L); + case 108: + return jjMoveStringLiteralDfa1_0(0x200000L); + case 109: + return jjMoveStringLiteralDfa1_0(0x100000L); + case 110: + return jjMoveStringLiteralDfa1_0(0x80000L); + default : + return jjMoveNfa_0(1, 0); + } +} +private int jjMoveStringLiteralDfa1_0(long active0){ + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { + jjStopStringLiteralDfa_0(0, active0); + return 1; + } + switch(curChar) + { + case 61: + if ((active0 & 0x20L) != 0L) + return jjStopAtPos(1, 5); + else if ((active0 & 0x80L) != 0L) + return jjStopAtPos(1, 7); + break; + case 97: + return jjMoveStringLiteralDfa2_0(active0, 0x100000L); + case 105: + return jjMoveStringLiteralDfa2_0(active0, 0x200000L); + case 111: + return jjMoveStringLiteralDfa2_0(active0, 0x80000L); + default : + break; + } + return jjStartNfa_0(0, active0); +} +private int jjMoveStringLiteralDfa2_0(long old0, long active0){ + if (((active0 &= old0)) == 0L) + return jjStartNfa_0(0, old0); + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { + jjStopStringLiteralDfa_0(1, active0); + return 2; + } + switch(curChar) + { + case 107: + return jjMoveStringLiteralDfa3_0(active0, 0x200000L); + case 116: + if ((active0 & 0x80000L) != 0L) + return jjStartNfaWithStates_0(2, 19, 26); + return jjMoveStringLiteralDfa3_0(active0, 0x100000L); + default : + break; + } + return jjStartNfa_0(1, active0); +} +private int jjMoveStringLiteralDfa3_0(long old0, long active0){ + if (((active0 &= old0)) == 0L) + return jjStartNfa_0(1, old0); + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { + jjStopStringLiteralDfa_0(2, active0); + return 3; + } + switch(curChar) + { + case 99: + return jjMoveStringLiteralDfa4_0(active0, 0x100000L); + case 101: + if ((active0 & 0x200000L) != 0L) + return jjStartNfaWithStates_0(3, 21, 26); + break; + default : + break; + } + return jjStartNfa_0(2, active0); +} +private int jjMoveStringLiteralDfa4_0(long old0, long active0){ + if (((active0 &= old0)) == 0L) + return jjStartNfa_0(2, old0); + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { + jjStopStringLiteralDfa_0(3, active0); + return 4; + } + switch(curChar) + { + case 104: + return jjMoveStringLiteralDfa5_0(active0, 0x100000L); + default : + break; + } + return jjStartNfa_0(3, active0); +} +private int jjMoveStringLiteralDfa5_0(long old0, long active0){ + if (((active0 &= old0)) == 0L) + return jjStartNfa_0(3, old0); + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { + jjStopStringLiteralDfa_0(4, active0); + return 5; + } + switch(curChar) + { + case 101: + return jjMoveStringLiteralDfa6_0(active0, 0x100000L); + default : + break; + } + return jjStartNfa_0(4, active0); +} +private int jjMoveStringLiteralDfa6_0(long old0, long active0){ + if (((active0 &= old0)) == 0L) + return jjStartNfa_0(4, old0); + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { + jjStopStringLiteralDfa_0(5, active0); + return 6; + } + switch(curChar) + { + case 115: + if ((active0 & 0x100000L) != 0L) + return jjStartNfaWithStates_0(6, 20, 26); + break; + default : + break; + } + return jjStartNfa_0(5, active0); +} +private int jjStartNfaWithStates_0(int pos, int kind, int state) +{ + jjmatchedKind = kind; + jjmatchedPos = pos; + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { return pos + 1; } + return jjMoveNfa_0(state, pos + 1); +} +private int jjMoveNfa_0(int startState, int curPos) +{ + int startsAt = 0; + jjnewStateCnt = 37; + int i = 1; + jjstateSet[0] = startState; + int kind = 0x7fffffff; + for (;;) + { + if (++jjround == 0x7fffffff) + ReInitRounds(); + if (curChar < 64) + { + long l = 1L << curChar; + do + { + switch(jjstateSet[--i]) + { + case 1: + if ((0x3ff000000000000L & l) != 0L) + { + if (kind > 11) + kind = 11; + { jjCheckNAddStates(0, 6); } + } + else if (curChar == 38) + jjstateSet[jjnewStateCnt++] = 15; + else if (curChar == 46) + { jjCheckNAdd(8); } + else if (curChar == 60) + jjstateSet[jjnewStateCnt++] = 5; + else if (curChar == 33) + jjstateSet[jjnewStateCnt++] = 3; + else if (curChar == 61) + { + if (kind > 3) + kind = 3; + } + if (curChar == 61) + jjstateSet[jjnewStateCnt++] = 0; + break; + case 0: + if (curChar == 61 && kind > 3) + kind = 3; + break; + case 2: + if (curChar == 61 && kind > 3) + kind = 3; + break; + case 3: + if (curChar == 61 && kind > 4) + kind = 4; + break; + case 4: + if (curChar == 33) + jjstateSet[jjnewStateCnt++] = 3; + break; + case 5: + if (curChar == 62) + kind = 4; + break; + case 6: + if (curChar == 60) + jjstateSet[jjnewStateCnt++] = 5; + break; + case 7: + if (curChar == 46) + { jjCheckNAdd(8); } + break; + case 8: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 13) + kind = 13; + { jjCheckNAddTwoStates(8, 9); } + break; + case 10: + if ((0x280000000000L & l) != 0L) + { jjCheckNAdd(11); } + break; + case 11: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 13) + kind = 13; + { jjCheckNAdd(11); } + break; + case 15: + if (curChar == 38 && kind > 17) + kind = 17; + break; + case 16: + if (curChar == 38) + jjstateSet[jjnewStateCnt++] = 15; + break; + case 22: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 22) + kind = 22; + jjstateSet[jjnewStateCnt++] = 22; + break; + case 24: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 22) + kind = 22; + jjstateSet[jjnewStateCnt++] = 24; + break; + case 26: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 22) + kind = 22; + jjstateSet[jjnewStateCnt++] = 26; + break; + case 27: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 11) + kind = 11; + { jjCheckNAddStates(0, 6); } + break; + case 28: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 11) + kind = 11; + { jjCheckNAdd(28); } + break; + case 29: + if ((0x3ff000000000000L & l) != 0L) + { jjCheckNAddTwoStates(29, 30); } + break; + case 31: + if ((0x280000000000L & l) != 0L) + { jjCheckNAdd(32); } + break; + case 32: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 13) + kind = 13; + { jjCheckNAdd(32); } + break; + case 33: + if ((0x3ff000000000000L & l) != 0L) + { jjCheckNAddTwoStates(33, 34); } + break; + case 34: + if (curChar != 46) + break; + if (kind > 13) + kind = 13; + { jjCheckNAddTwoStates(35, 9); } + break; + case 35: + if ((0x3ff000000000000L & l) == 0L) + break; + if (kind > 13) + kind = 13; + { jjCheckNAddTwoStates(35, 9); } + break; + case 36: + if ((0x3ff000000000000L & l) != 0L) + { jjCheckNAddTwoStates(36, 7); } + break; + default : break; + } + } while(i != startsAt); + } + else if (curChar < 128) + { + long l = 1L << (curChar & 077); + do + { + switch(jjstateSet[--i]) + { + case 1: + if ((0x7fffffe00000000L & l) != 0L) + { jjCheckNAdd(26); } + else if ((0x7fffffeL & l) != 0L) + { jjCheckNAdd(24); } + else if (curChar == 95) + { jjCheckNAdd(22); } + else if (curChar == 124) + jjstateSet[jjnewStateCnt++] = 19; + if (curChar == 111) + jjstateSet[jjnewStateCnt++] = 17; + else if (curChar == 97) + jjstateSet[jjnewStateCnt++] = 13; + break; + case 9: + if ((0x2000000020L & l) != 0L) + { jjAddStates(7, 8); } + break; + case 12: + if (curChar == 100 && kind > 17) + kind = 17; + break; + case 13: + if (curChar == 110) + jjstateSet[jjnewStateCnt++] = 12; + break; + case 14: + if (curChar == 97) + jjstateSet[jjnewStateCnt++] = 13; + break; + case 17: + if (curChar == 114 && kind > 18) + kind = 18; + break; + case 18: + if (curChar == 111) + jjstateSet[jjnewStateCnt++] = 17; + break; + case 19: + if (curChar == 124 && kind > 18) + kind = 18; + break; + case 20: + if (curChar == 124) + jjstateSet[jjnewStateCnt++] = 19; + break; + case 21: + if (curChar == 95) + { jjCheckNAdd(22); } + break; + case 22: + if ((0x7fffffe87fffffeL & l) == 0L) + break; + if (kind > 22) + kind = 22; + { jjCheckNAdd(22); } + break; + case 23: + if ((0x7fffffeL & l) != 0L) + { jjCheckNAdd(24); } + break; + case 24: + if ((0x7fffffe87fffffeL & l) == 0L) + break; + if (kind > 22) + kind = 22; + { jjCheckNAdd(24); } + break; + case 25: + if ((0x7fffffe00000000L & l) != 0L) + { jjCheckNAdd(26); } + break; + case 26: + if ((0x7fffffe87fffffeL & l) == 0L) + break; + if (kind > 22) + kind = 22; + { jjCheckNAdd(26); } + break; + case 30: + if ((0x2000000020L & l) != 0L) + { jjAddStates(9, 10); } + break; + default : break; + } + } while(i != startsAt); + } + else + { + int i2 = (curChar & 0xff) >> 6; + long l2 = 1L << (curChar & 077); + do + { + switch(jjstateSet[--i]) + { + default : break; + } + } while(i != startsAt); + } + if (kind != 0x7fffffff) + { + jjmatchedKind = kind; + jjmatchedPos = curPos; + kind = 0x7fffffff; + } + ++curPos; + if ((i = jjnewStateCnt) == (startsAt = 37 - (jjnewStateCnt = startsAt))) + return curPos; + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { return curPos; } + } +} +private int jjMoveStringLiteralDfa0_2() +{ + return jjMoveNfa_2(0, 0); +} +static final long[] jjbitVec0 = { + 0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL +}; +private int jjMoveNfa_2(int startState, int curPos) +{ + int startsAt = 0; + jjnewStateCnt = 2; + int i = 1; + jjstateSet[0] = startState; + int kind = 0x7fffffff; + for (;;) + { + if (++jjround == 0x7fffffff) + ReInitRounds(); + if (curChar < 64) + { + long l = 1L << curChar; + do + { + switch(jjstateSet[--i]) + { + case 0: + if ((0xffffff7fffffffffL & l) != 0L) + { + if (kind > 29) + kind = 29; + } + else if (curChar == 39) + { + if (kind > 28) + kind = 28; + } + break; + case 1: + if ((0xffffff7fffffffffL & l) != 0L) + kind = 29; + break; + default : break; + } + } while(i != startsAt); + } + else if (curChar < 128) + { + long l = 1L << (curChar & 077); + do + { + switch(jjstateSet[--i]) + { + case 0: + kind = 29; + break; + default : break; + } + } while(i != startsAt); + } + else + { + int i2 = (curChar & 0xff) >> 6; + long l2 = 1L << (curChar & 077); + do + { + switch(jjstateSet[--i]) + { + case 0: + if ((jjbitVec0[i2] & l2) != 0L && kind > 29) + kind = 29; + break; + default : break; + } + } while(i != startsAt); + } + if (kind != 0x7fffffff) + { + jjmatchedKind = kind; + jjmatchedPos = curPos; + kind = 0x7fffffff; + } + ++curPos; + if ((i = jjnewStateCnt) == (startsAt = 2 - (jjnewStateCnt = startsAt))) + return curPos; + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { return curPos; } + } +} +private int jjMoveStringLiteralDfa0_1() +{ + return jjMoveNfa_1(0, 0); +} +private int jjMoveNfa_1(int startState, int curPos) +{ + int startsAt = 0; + jjnewStateCnt = 2; + int i = 1; + jjstateSet[0] = startState; + int kind = 0x7fffffff; + for (;;) + { + if (++jjround == 0x7fffffff) + ReInitRounds(); + if (curChar < 64) + { + long l = 1L << curChar; + do + { + switch(jjstateSet[--i]) + { + case 0: + if ((0xfffffffbffffffffL & l) != 0L) + { + if (kind > 27) + kind = 27; + } + else if (curChar == 34) + { + if (kind > 26) + kind = 26; + } + break; + case 1: + if ((0xfffffffbffffffffL & l) != 0L) + kind = 27; + break; + default : break; + } + } while(i != startsAt); + } + else if (curChar < 128) + { + long l = 1L << (curChar & 077); + do + { + switch(jjstateSet[--i]) + { + case 0: + kind = 27; + break; + default : break; + } + } while(i != startsAt); + } + else + { + int i2 = (curChar & 0xff) >> 6; + long l2 = 1L << (curChar & 077); + do + { + switch(jjstateSet[--i]) + { + case 0: + if ((jjbitVec0[i2] & l2) != 0L && kind > 27) + kind = 27; + break; + default : break; + } + } while(i != startsAt); + } + if (kind != 0x7fffffff) + { + jjmatchedKind = kind; + jjmatchedPos = curPos; + kind = 0x7fffffff; + } + ++curPos; + if ((i = jjnewStateCnt) == (startsAt = 2 - (jjnewStateCnt = startsAt))) + return curPos; + try { curChar = input_stream.readChar(); } + catch(java.io.IOException e) { return curPos; } + } +} +static final int[] jjnextStates = { + 28, 29, 30, 33, 34, 36, 7, 10, 11, 31, 32, +}; + +/** Token literal values. */ +public static final String[] jjstrLiteralImages = { +"", null, null, null, null, "\76\75", "\76", "\74\75", "\74", "\50", "\51", +null, null, null, null, null, null, null, null, "\156\157\164", +"\155\141\164\143\150\145\163", "\154\151\153\145", null, null, "\42", "\47", null, null, null, null, }; +protected Token jjFillToken() +{ + final Token t; + final String curTokenImage; + final int beginLine; + final int endLine; + final int beginColumn; + final int endColumn; + String im = jjstrLiteralImages[jjmatchedKind]; + curTokenImage = (im == null) ? input_stream.GetImage() : im; + beginLine = input_stream.getBeginLine(); + beginColumn = input_stream.getBeginColumn(); + endLine = input_stream.getEndLine(); + endColumn = input_stream.getEndColumn(); + t = Token.newToken(jjmatchedKind, curTokenImage); + + t.beginLine = beginLine; + t.endLine = endLine; + t.beginColumn = beginColumn; + t.endColumn = endColumn; + + return t; +} + +int curLexState = 0; +int defaultLexState = 0; +int jjnewStateCnt; +int jjround; +int jjmatchedPos; +int jjmatchedKind; + +/** Get the next Token. */ +public Token getNextToken() +{ + Token matchedToken; + int curPos = 0; + + EOFLoop : + for (;;) + { + try + { + curChar = input_stream.BeginToken(); + } + catch(Exception e) + { + jjmatchedKind = 0; + jjmatchedPos = -1; + matchedToken = jjFillToken(); + return matchedToken; + } + + switch(curLexState) + { + case 0: + try { input_stream.backup(0); + while (curChar <= 32 && (0x100000200L & (1L << curChar)) != 0L) + curChar = input_stream.BeginToken(); + } + catch (java.io.IOException e1) { continue EOFLoop; } + jjmatchedKind = 0x7fffffff; + jjmatchedPos = 0; + curPos = jjMoveStringLiteralDfa0_0(); + break; + case 1: + jjmatchedKind = 0x7fffffff; + jjmatchedPos = 0; + curPos = jjMoveStringLiteralDfa0_1(); + break; + case 2: + jjmatchedKind = 0x7fffffff; + jjmatchedPos = 0; + curPos = jjMoveStringLiteralDfa0_2(); + break; + } + if (jjmatchedKind != 0x7fffffff) + { + if (jjmatchedPos + 1 < curPos) + input_stream.backup(curPos - jjmatchedPos - 1); + if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L) + { + matchedToken = jjFillToken(); + if (jjnewLexState[jjmatchedKind] != -1) + curLexState = jjnewLexState[jjmatchedKind]; + return matchedToken; + } + else + { + if (jjnewLexState[jjmatchedKind] != -1) + curLexState = jjnewLexState[jjmatchedKind]; + continue EOFLoop; + } + } + int error_line = input_stream.getEndLine(); + int error_column = input_stream.getEndColumn(); + String error_after = null; + boolean EOFSeen = false; + try { input_stream.readChar(); input_stream.backup(1); } + catch (java.io.IOException e1) { + EOFSeen = true; + error_after = curPos <= 1 ? "" : input_stream.GetImage(); + if (curChar == '\n' || curChar == '\r') { + error_line++; + error_column = 0; + } + else + error_column++; + } + if (!EOFSeen) { + input_stream.backup(1); + error_after = curPos <= 1 ? "" : input_stream.GetImage(); + } + throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR); + } +} + +private void jjCheckNAdd(int state) +{ + if (jjrounds[state] != jjround) + { + jjstateSet[jjnewStateCnt++] = state; + jjrounds[state] = jjround; + } +} +private void jjAddStates(int start, int end) +{ + do { + jjstateSet[jjnewStateCnt++] = jjnextStates[start]; + } while (start++ != end); +} +private void jjCheckNAddTwoStates(int state1, int state2) +{ + jjCheckNAdd(state1); + jjCheckNAdd(state2); +} + +private void jjCheckNAddStates(int start, int end) +{ + do { + jjCheckNAdd(jjnextStates[start]); + } while (start++ != end); +} + + /** Constructor. */ + public PartitionParserTokenManager(SimpleCharStream stream){ + + if (SimpleCharStream.staticFlag) + throw new Error("ERROR: Cannot use a static CharStream class with a non-static lexical analyzer."); + + input_stream = stream; + } + + /** Constructor. */ + public PartitionParserTokenManager (SimpleCharStream stream, int lexState){ + ReInit(stream); + SwitchTo(lexState); + } + + /** Reinitialise parser. */ + public void ReInit(SimpleCharStream stream) + { + + jjmatchedPos = jjnewStateCnt = 0; + curLexState = defaultLexState; + input_stream = stream; + ReInitRounds(); + } + + private void ReInitRounds() + { + int i; + jjround = 0x80000001; + for (i = 37; i-- > 0;) + jjrounds[i] = 0x80000000; + } + + /** Reinitialise parser. */ + public void ReInit( SimpleCharStream stream, int lexState) + { + + ReInit( stream); + SwitchTo(lexState); + } + + /** Switch to specified lex state. */ + public void SwitchTo(int lexState) + { + if (lexState >= 3 || lexState < 0) + throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE); + else + curLexState = lexState; + } + +/** Lexer state names. */ +public static final String[] lexStateNames = { + "DEFAULT", + "STRING_STATE", + "SSTRING_STATE", +}; + +/** Lex State array. */ +public static final int[] jjnewLexState = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, + 2, 0, -1, 0, -1, +}; +static final long[] jjtoToken = { + 0x3f7e2ff9L, +}; +static final long[] jjtoSkip = { + 0x6L, +}; + protected SimpleCharStream input_stream; + + private final int[] jjrounds = new int[37]; + private final int[] jjstateSet = new int[2 * 37]; + + + protected int curChar; +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserTreeConstants.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserTreeConstants.java new file mode 100644 index 000000000..d3de2ef7d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserTreeConstants.java @@ -0,0 +1,45 @@ +/* Generated By:JavaCC: Do not edit this line. PartitionParserTreeConstants.java Version 6.1_2 */ +package com.netflix.metacat.common.partition.parser; + +public interface PartitionParserTreeConstants +{ + public int JJTFILTER = 0; + public int JJTVOID = 1; + public int JJTAND = 2; + public int JJTOR = 3; + public int JJTNEVAL = 4; + public int JJTEVAL = 5; + public int JJTGT = 6; + public int JJTLT = 7; + public int JJTLTE = 8; + public int JJTGTE = 9; + public int JJTEQ = 10; + public int JJTNEQ = 11; + public int JJTMATCHES = 12; + public int JJTLIKE = 13; + public int JJTNUM = 14; + public int JJTSTRING = 15; + public int JJTVAR = 16; + + + public String[] jjtNodeName = { + "FILTER", + "void", + "AND", + "OR", + "NEVAL", + "EVAL", + "GT", + "LT", + "LTE", + "GTE", + "EQ", + "NEQ", + "MATCHES", + "LIKE", + "NUM", + "STRING", + "VAR", + }; +} +/* JavaCC - OriginalChecksum=39fa5c3d77b7e45b2de2245168f22ad8 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserVisitor.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserVisitor.java new file mode 100644 index 000000000..ec9f7561c --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/PartitionParserVisitor.java @@ -0,0 +1,24 @@ +/* Generated By:JavaCC: Do not edit this line. PartitionParserVisitor.java Version 6.1_2 */ +package com.netflix.metacat.common.partition.parser; + +public interface PartitionParserVisitor +{ + public Object visit(SimpleNode node, Object data); + public Object visit(ASTFILTER node, Object data); + public Object visit(ASTAND node, Object data); + public Object visit(ASTOR node, Object data); + public Object visit(ASTNEVAL node, Object data); + public Object visit(ASTEVAL node, Object data); + public Object visit(ASTGT node, Object data); + public Object visit(ASTLT node, Object data); + public Object visit(ASTLTE node, Object data); + public Object visit(ASTGTE node, Object data); + public Object visit(ASTEQ node, Object data); + public Object visit(ASTNEQ node, Object data); + public Object visit(ASTMATCHES node, Object data); + public Object visit(ASTLIKE node, Object data); + public Object visit(ASTNUM node, Object data); + public Object visit(ASTSTRING node, Object data); + public Object visit(ASTVAR node, Object data); +} +/* JavaCC - OriginalChecksum=05c7915a864bf7b60e61bf6f66b57af6 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/SimpleCharStream.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/SimpleCharStream.java new file mode 100644 index 000000000..5bb4b2dd7 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/SimpleCharStream.java @@ -0,0 +1,474 @@ +/* Generated By:JavaCC: Do not edit this line. SimpleCharStream.java Version 6.1 */ +/* JavaCCOptions:STATIC=false,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +/** + * An implementation of interface CharStream, where the stream is assumed to + * contain only ASCII characters (without unicode processing). + */ + +public class SimpleCharStream +{ +/** Whether parser is static. */ + public static final boolean staticFlag = false; + int bufsize; + int available; + int tokenBegin; +/** Position in buffer. */ + public int bufpos = -1; + protected int bufline[]; + protected int bufcolumn[]; + + protected int column = 0; + protected int line = 1; + + protected boolean prevCharIsCR = false; + protected boolean prevCharIsLF = false; + + protected java.io.Reader inputStream; + + protected char[] buffer; + protected int maxNextCharInd = 0; + protected int inBuf = 0; + protected int tabSize = 1; + protected boolean trackLineColumn = true; + + public void setTabSize(int i) { tabSize = i; } + public int getTabSize() { return tabSize; } + + + + protected void ExpandBuff(boolean wrapAround) + { + char[] newbuffer = new char[bufsize + 2048]; + int newbufline[] = new int[bufsize + 2048]; + int newbufcolumn[] = new int[bufsize + 2048]; + + try + { + if (wrapAround) + { + System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); + System.arraycopy(buffer, 0, newbuffer, bufsize - tokenBegin, bufpos); + buffer = newbuffer; + + System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); + System.arraycopy(bufline, 0, newbufline, bufsize - tokenBegin, bufpos); + bufline = newbufline; + + System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); + System.arraycopy(bufcolumn, 0, newbufcolumn, bufsize - tokenBegin, bufpos); + bufcolumn = newbufcolumn; + + maxNextCharInd = (bufpos += (bufsize - tokenBegin)); + } + else + { + System.arraycopy(buffer, tokenBegin, newbuffer, 0, bufsize - tokenBegin); + buffer = newbuffer; + + System.arraycopy(bufline, tokenBegin, newbufline, 0, bufsize - tokenBegin); + bufline = newbufline; + + System.arraycopy(bufcolumn, tokenBegin, newbufcolumn, 0, bufsize - tokenBegin); + bufcolumn = newbufcolumn; + + maxNextCharInd = (bufpos -= tokenBegin); + } + } + catch (Throwable t) + { + throw new Error(t.getMessage()); + } + + + bufsize += 2048; + available = bufsize; + tokenBegin = 0; + } + + protected void FillBuff() throws java.io.IOException + { + if (maxNextCharInd == available) + { + if (available == bufsize) + { + if (tokenBegin > 2048) + { + bufpos = maxNextCharInd = 0; + available = tokenBegin; + } + else if (tokenBegin < 0) + bufpos = maxNextCharInd = 0; + else + ExpandBuff(false); + } + else if (available > tokenBegin) + available = bufsize; + else if ((tokenBegin - available) < 2048) + ExpandBuff(true); + else + available = tokenBegin; + } + + int i; + try { + if ((i = inputStream.read(buffer, maxNextCharInd, available - maxNextCharInd)) == -1) + { + inputStream.close(); + throw new java.io.IOException(); + } + else + maxNextCharInd += i; + return; + } + catch(java.io.IOException e) { + --bufpos; + backup(0); + if (tokenBegin == -1) + tokenBegin = bufpos; + throw e; + } + } + +/** Start. */ + public char BeginToken() throws java.io.IOException + { + tokenBegin = -1; + char c = readChar(); + tokenBegin = bufpos; + + return c; + } + + protected void UpdateLineColumn(char c) + { + column++; + + if (prevCharIsLF) + { + prevCharIsLF = false; + line += (column = 1); + } + else if (prevCharIsCR) + { + prevCharIsCR = false; + if (c == '\n') + { + prevCharIsLF = true; + } + else + line += (column = 1); + } + + switch (c) + { + case '\r' : + prevCharIsCR = true; + break; + case '\n' : + prevCharIsLF = true; + break; + case '\t' : + column--; + column += (tabSize - (column % tabSize)); + break; + default : + break; + } + + bufline[bufpos] = line; + bufcolumn[bufpos] = column; + } + +/** Read a character. */ + public char readChar() throws java.io.IOException + { + if (inBuf > 0) + { + --inBuf; + + if (++bufpos == bufsize) + bufpos = 0; + + return buffer[bufpos]; + } + + if (++bufpos >= maxNextCharInd) + FillBuff(); + + char c = buffer[bufpos]; + + UpdateLineColumn(c); + return c; + } + + @Deprecated + /** + * @deprecated + * @see #getEndColumn + */ + + public int getColumn() { + return bufcolumn[bufpos]; + } + + @Deprecated + /** + * @deprecated + * @see #getEndLine + */ + + public int getLine() { + return bufline[bufpos]; + } + + /** Get token end column number. */ + public int getEndColumn() { + return bufcolumn[bufpos]; + } + + /** Get token end line number. */ + public int getEndLine() { + return bufline[bufpos]; + } + + /** Get token beginning column number. */ + public int getBeginColumn() { + return bufcolumn[tokenBegin]; + } + + /** Get token beginning line number. */ + public int getBeginLine() { + return bufline[tokenBegin]; + } + +/** Backup a number of characters. */ + public void backup(int amount) { + + inBuf += amount; + if ((bufpos -= amount) < 0) + bufpos += bufsize; + } + + /** Constructor. */ + public SimpleCharStream(java.io.Reader dstream, int startline, + int startcolumn, int buffersize) + { + inputStream = dstream; + line = startline; + column = startcolumn - 1; + + available = bufsize = buffersize; + buffer = new char[buffersize]; + bufline = new int[buffersize]; + bufcolumn = new int[buffersize]; + } + + /** Constructor. */ + public SimpleCharStream(java.io.Reader dstream, int startline, + int startcolumn) + { + this(dstream, startline, startcolumn, 4096); + } + + /** Constructor. */ + public SimpleCharStream(java.io.Reader dstream) + { + this(dstream, 1, 1, 4096); + } + + /** Reinitialise. */ + public void ReInit(java.io.Reader dstream, int startline, + int startcolumn, int buffersize) + { + inputStream = dstream; + line = startline; + column = startcolumn - 1; + + if (buffer == null || buffersize != buffer.length) + { + available = bufsize = buffersize; + buffer = new char[buffersize]; + bufline = new int[buffersize]; + bufcolumn = new int[buffersize]; + } + prevCharIsLF = prevCharIsCR = false; + tokenBegin = inBuf = maxNextCharInd = 0; + bufpos = -1; + } + + /** Reinitialise. */ + public void ReInit(java.io.Reader dstream, int startline, + int startcolumn) + { + ReInit(dstream, startline, startcolumn, 4096); + } + + /** Reinitialise. */ + public void ReInit(java.io.Reader dstream) + { + ReInit(dstream, 1, 1, 4096); + } + /** Constructor. */ + public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, + int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException + { + this(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); + } + + /** Constructor. */ + public SimpleCharStream(java.io.InputStream dstream, int startline, + int startcolumn, int buffersize) + { + this(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); + } + + /** Constructor. */ + public SimpleCharStream(java.io.InputStream dstream, String encoding, int startline, + int startcolumn) throws java.io.UnsupportedEncodingException + { + this(dstream, encoding, startline, startcolumn, 4096); + } + + /** Constructor. */ + public SimpleCharStream(java.io.InputStream dstream, int startline, + int startcolumn) + { + this(dstream, startline, startcolumn, 4096); + } + + /** Constructor. */ + public SimpleCharStream(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException + { + this(dstream, encoding, 1, 1, 4096); + } + + /** Constructor. */ + public SimpleCharStream(java.io.InputStream dstream) + { + this(dstream, 1, 1, 4096); + } + + /** Reinitialise. */ + public void ReInit(java.io.InputStream dstream, String encoding, int startline, + int startcolumn, int buffersize) throws java.io.UnsupportedEncodingException + { + ReInit(encoding == null ? new java.io.InputStreamReader(dstream) : new java.io.InputStreamReader(dstream, encoding), startline, startcolumn, buffersize); + } + + /** Reinitialise. */ + public void ReInit(java.io.InputStream dstream, int startline, + int startcolumn, int buffersize) + { + ReInit(new java.io.InputStreamReader(dstream), startline, startcolumn, buffersize); + } + + /** Reinitialise. */ + public void ReInit(java.io.InputStream dstream, String encoding) throws java.io.UnsupportedEncodingException + { + ReInit(dstream, encoding, 1, 1, 4096); + } + + /** Reinitialise. */ + public void ReInit(java.io.InputStream dstream) + { + ReInit(dstream, 1, 1, 4096); + } + /** Reinitialise. */ + public void ReInit(java.io.InputStream dstream, String encoding, int startline, + int startcolumn) throws java.io.UnsupportedEncodingException + { + ReInit(dstream, encoding, startline, startcolumn, 4096); + } + /** Reinitialise. */ + public void ReInit(java.io.InputStream dstream, int startline, + int startcolumn) + { + ReInit(dstream, startline, startcolumn, 4096); + } + /** Get token literal value. */ + public String GetImage() + { + if (bufpos >= tokenBegin) + return new String(buffer, tokenBegin, bufpos - tokenBegin + 1); + else + return new String(buffer, tokenBegin, bufsize - tokenBegin) + + new String(buffer, 0, bufpos + 1); + } + + /** Get the suffix. */ + public char[] GetSuffix(int len) + { + char[] ret = new char[len]; + + if ((bufpos + 1) >= len) + System.arraycopy(buffer, bufpos - len + 1, ret, 0, len); + else + { + System.arraycopy(buffer, bufsize - (len - bufpos - 1), ret, 0, + len - bufpos - 1); + System.arraycopy(buffer, 0, ret, len - bufpos - 1, bufpos + 1); + } + + return ret; + } + + /** Reset buffer when finished. */ + public void Done() + { + buffer = null; + bufline = null; + bufcolumn = null; + } + + /** + * Method to adjust line and column numbers for the start of a token. + */ + public void adjustBeginLineColumn(int newLine, int newCol) + { + int start = tokenBegin; + int len; + + if (bufpos >= tokenBegin) + { + len = bufpos - tokenBegin + inBuf + 1; + } + else + { + len = bufsize - tokenBegin + bufpos + 1 + inBuf; + } + + int i = 0, j = 0, k = 0; + int nextColDiff = 0, columnDiff = 0; + + while (i < len && bufline[j = start % bufsize] == bufline[k = ++start % bufsize]) + { + bufline[j] = newLine; + nextColDiff = columnDiff + bufcolumn[k] - bufcolumn[j]; + bufcolumn[j] = newCol + columnDiff; + columnDiff = nextColDiff; + i++; + } + + if (i < len) + { + bufline[j] = newLine++; + bufcolumn[j] = newCol + columnDiff; + + while (i++ < len) + { + if (bufline[j = start % bufsize] != bufline[++start % bufsize]) + bufline[j] = newLine++; + else + bufline[j] = newLine; + } + } + + line = bufline[j]; + column = bufcolumn[j]; + } + boolean getTrackLineColumn() { return trackLineColumn; } + void setTrackLineColumn(boolean tlc) { trackLineColumn = tlc; } +} +/* JavaCC - OriginalChecksum=9c39ca7cdd22491638c65876bc9c5ba3 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/SimpleNode.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/SimpleNode.java new file mode 100644 index 000000000..3f0a6ecb5 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/SimpleNode.java @@ -0,0 +1,102 @@ +/* Generated By:JJTree: Do not edit this line. SimpleNode.java Version 6.1 */ +/* JavaCCOptions:MULTI=true,NODE_USES_PARSER=false,VISITOR=true,TRACK_TOKENS=false,NODE_PREFIX=AST,NODE_EXTENDS=,NODE_FACTORY=,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +public +class SimpleNode implements Node { + + protected Node parent; + protected Node[] children; + protected int id; + protected Object value; + protected PartitionParser parser; + + public SimpleNode(int i) { + id = i; + } + + public SimpleNode(PartitionParser p, int i) { + this(i); + parser = p; + } + + public void jjtOpen() { + } + + public void jjtClose() { + } + + public void jjtSetParent(Node n) { parent = n; } + public Node jjtGetParent() { return parent; } + + public void jjtAddChild(Node n, int i) { + if (children == null) { + children = new Node[i + 1]; + } else if (i >= children.length) { + Node c[] = new Node[i + 1]; + System.arraycopy(children, 0, c, 0, children.length); + children = c; + } + children[i] = n; + } + + public Node jjtGetChild(int i) { + return children[i]; + } + + public int jjtGetNumChildren() { + return (children == null) ? 0 : children.length; + } + + public void jjtSetValue(Object value) { this.value = value; } + public Object jjtGetValue() { return value; } + + /** Accept the visitor. **/ + public Object jjtAccept(PartitionParserVisitor visitor, Object data) +{ + return visitor.visit(this, data); + } + + /** Accept the visitor. **/ + public Object childrenAccept(PartitionParserVisitor visitor, Object data) +{ + if (children != null) { + for (int i = 0; i < children.length; ++i) { + children[i].jjtAccept(visitor, data); + } + } + return data; + } + + /* You can override these two methods in subclasses of SimpleNode to + customize the way the node appears when the tree is dumped. If + your output uses more than one line you should override + toString(String), otherwise overriding toString() is probably all + you need to do. */ + + public String toString() { + return PartitionParserTreeConstants.jjtNodeName[id]; + } + public String toString(String prefix) { return prefix + toString(); } + + /* Override this method if you want to customize how the node dumps + out its children. */ + + public void dump(String prefix) { + System.out.println(toString(prefix)); + if (children != null) { + for (int i = 0; i < children.length; ++i) { + SimpleNode n = (SimpleNode)children[i]; + if (n != null) { + n.dump(prefix + " "); + } + } + } + } + + public int getId() { + return id; + } +} + +/* JavaCC - OriginalChecksum=d933f51a60f5df18ee957e3447d1a0d6 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Token.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Token.java new file mode 100644 index 000000000..fa40615f8 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Token.java @@ -0,0 +1,131 @@ +/* Generated By:JavaCC: Do not edit this line. Token.java Version 6.1 */ +/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COLUMN=true,SUPPORT_CLASS_VISIBILITY_PUBLIC=true */ +package com.netflix.metacat.common.partition.parser; + +/** + * Describes the input token stream. + */ + +public class Token implements java.io.Serializable { + + /** + * The version identifier for this Serializable class. + * Increment only if the serialized form of the + * class changes. + */ + private static final long serialVersionUID = 1L; + + /** + * An integer that describes the kind of this token. This numbering + * system is determined by JavaCCParser, and a table of these numbers is + * stored in the file ...Constants.java. + */ + public int kind; + + /** The line number of the first character of this Token. */ + public int beginLine; + /** The column number of the first character of this Token. */ + public int beginColumn; + /** The line number of the last character of this Token. */ + public int endLine; + /** The column number of the last character of this Token. */ + public int endColumn; + + /** + * The string image of the token. + */ + public String image; + + /** + * A reference to the next regular (non-special) token from the input + * stream. If this is the last token from the input stream, or if the + * token manager has not read tokens beyond this one, this field is + * set to null. This is true only if this token is also a regular + * token. Otherwise, see below for a description of the contents of + * this field. + */ + public Token next; + + /** + * This field is used to access special tokens that occur prior to this + * token, but after the immediately preceding regular (non-special) token. + * If there are no such special tokens, this field is set to null. + * When there are more than one such special token, this field refers + * to the last of these special tokens, which in turn refers to the next + * previous special token through its specialToken field, and so on + * until the first special token (whose specialToken field is null). + * The next fields of special tokens refer to other special tokens that + * immediately follow it (without an intervening regular token). If there + * is no such token, this field is null. + */ + public Token specialToken; + + /** + * An optional attribute value of the Token. + * Tokens which are not used as syntactic sugar will often contain + * meaningful values that will be used later on by the compiler or + * interpreter. This attribute value is often different from the image. + * Any subclass of Token that actually wants to return a non-null value can + * override this method as appropriate. + */ + public Object getValue() { + return null; + } + + /** + * No-argument constructor + */ + public Token() {} + + /** + * Constructs a new token for the specified Image. + */ + public Token(int kind) + { + this(kind, null); + } + + /** + * Constructs a new token for the specified Image and Kind. + */ + public Token(int kind, String image) + { + this.kind = kind; + this.image = image; + } + + /** + * Returns the image. + */ + public String toString() + { + return image; + } + + /** + * Returns a new Token object, by default. However, if you want, you + * can create and return subclass objects based on the value of ofKind. + * Simply add the cases to the switch for all those special cases. + * For example, if you have a subclass of Token called IDToken that + * you want to create if ofKind is ID, simply add something like : + * + * case MyParserConstants.ID : return new IDToken(ofKind, image); + * + * to the following switch statement. Then you can cast matchedToken + * variable to the appropriate type and use sit in your lexical actions. + */ + public static Token newToken(int ofKind, String image) + { + switch(ofKind) + { + default : return new Token(ofKind, image); + } + } + + public static Token newToken(int ofKind) + { + return newToken(ofKind, null); + } + +} +/* JavaCC - OriginalChecksum=4d702a3a65566da534de18086a42aa30 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/TokenMgrError.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/TokenMgrError.java new file mode 100644 index 000000000..e44a4a8ea --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/TokenMgrError.java @@ -0,0 +1,146 @@ +/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 6.1 */ +/* JavaCCOptions: */ +package com.netflix.metacat.common.partition.parser; + +/** Token Manager Error. */ +public class TokenMgrError extends Error +{ + + /** + * The version identifier for this Serializable class. + * Increment only if the serialized form of the + * class changes. + */ + private static final long serialVersionUID = 1L; + + /* + * Ordinals for various reasons why an Error of this type can be thrown. + */ + + /** + * Lexical error occurred. + */ + public static final int LEXICAL_ERROR = 0; + + /** + * An attempt was made to create a second instance of a static token manager. + */ + public static final int STATIC_LEXER_ERROR = 1; + + /** + * Tried to change to an invalid lexical state. + */ + public static final int INVALID_LEXICAL_STATE = 2; + + /** + * Detected (and bailed out of) an infinite loop in the token manager. + */ + public static final int LOOP_DETECTED = 3; + + /** + * Indicates the reason why the exception is thrown. It will have + * one of the above 4 values. + */ + int errorCode; + + /** + * Replaces unprintable characters by their escaped (or unicode escaped) + * equivalents in the given string + */ + protected static final String addEscapes(String str) { + StringBuffer retval = new StringBuffer(); + char ch; + for (int i = 0; i < str.length(); i++) { + switch (str.charAt(i)) + { + case '\b': + retval.append("\\b"); + continue; + case '\t': + retval.append("\\t"); + continue; + case '\n': + retval.append("\\n"); + continue; + case '\f': + retval.append("\\f"); + continue; + case '\r': + retval.append("\\r"); + continue; + case '\"': + retval.append("\\\""); + continue; + case '\'': + retval.append("\\\'"); + continue; + case '\\': + retval.append("\\\\"); + continue; + default: + if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { + String s = "0000" + Integer.toString(ch, 16); + retval.append("\\u" + s.substring(s.length() - 4, s.length())); + } else { + retval.append(ch); + } + continue; + } + } + return retval.toString(); + } + + /** + * Returns a detailed message for the Error when it is thrown by the + * token manager to indicate a lexical error. + * Parameters : + * EOFSeen : indicates if EOF caused the lexical error + * curLexState : lexical state in which this error occurred + * errorLine : line number when the error occurred + * errorColumn : column number when the error occurred + * errorAfter : prefix that was seen before this error occurred + * curchar : the offending character + * Note: You can customize the lexical error message by modifying this method. + */ + protected static String LexicalErr(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar) { + char curChar1 = (char)curChar; + return("Lexical error at line " + + errorLine + ", column " + + errorColumn + ". Encountered: " + + (EOFSeen ? " " : ("\"" + addEscapes(String.valueOf(curChar1)) + "\"") + " (" + (int)curChar + "), ") + + "after : \"" + addEscapes(errorAfter) + "\""); + } + + /** + * You can also modify the body of this method to customize your error messages. + * For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not + * of end-users concern, so you can return something like : + * + * "Internal Error : Please file a bug report .... " + * + * from this method for such cases in the release version of your parser. + */ + public String getMessage() { + return super.getMessage(); + } + + /* + * Constructors of various flavors follow. + */ + + /** No arg constructor. */ + public TokenMgrError() { + } + + /** Constructor with message and reason. */ + public TokenMgrError(String message, int reason) { + super(message); + errorCode = reason; + } + + /** Full Constructor. */ + public TokenMgrError(boolean EOFSeen, int lexState, int errorLine, int errorColumn, String errorAfter, int curChar, int reason) { + this(LexicalErr(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason); + } +} +/* JavaCC - OriginalChecksum=3812be28fe4e0d40fc76a8de299261b3 (do not edit this line) */ diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Variable.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Variable.java new file mode 100644 index 000000000..8439c7015 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/parser/Variable.java @@ -0,0 +1,17 @@ +package com.netflix.metacat.common.partition.parser; + +public class Variable { + private String name; + + public Variable(String name) { + this.setName(name); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/util/FilterPartition.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/util/FilterPartition.java new file mode 100644 index 000000000..776329a1e --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/util/FilterPartition.java @@ -0,0 +1,51 @@ +package com.netflix.metacat.common.partition.util; + +import com.google.common.collect.Maps; +import com.netflix.metacat.common.partition.parser.PartitionParser; +import com.netflix.metacat.common.partition.visitor.PartitionParserEval; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + +public class FilterPartition { + private static final Logger log = LoggerFactory.getLogger(FilterPartition.class); + + PartitionParser parser; + Map context = Maps.newLinkedHashMap(); + + public boolean evaluatePartitionExpression(String partitionExpression, String name, String path) throws + IOException { + return evaluatePartitionExpression(partitionExpression, name, path, false, null); + } + public boolean evaluatePartitionExpression(String partitionExpression, String name, String path, boolean batchid, Map values) { + if (partitionExpression != null) { + try { + if (parser == null) { + parser = new PartitionParser(new StringReader(partitionExpression)); + } else { + parser.ReInit(new StringReader(partitionExpression)); + } + context.clear(); + if (batchid) { + PartitionUtil.getPartitionKeyValues(path, context); + } + PartitionUtil.getPartitionKeyValues(name, context); + if( values != null){ + context.putAll(values); + } + if(context.size() > 0) { + return (Boolean) parser.filter().jjtAccept(new PartitionParserEval(context), null); + } else { + return false; + } + } catch(Exception e) { + log.warn("Caught unexpected exception during evaluatePartitionExpression," + e); + return false; + } + } + return true; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/util/PartitionUtil.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/util/PartitionUtil.java new file mode 100644 index 000000000..845c4d02e --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/util/PartitionUtil.java @@ -0,0 +1,55 @@ +package com.netflix.metacat.common.partition.util; + +import com.google.common.base.Splitter; +import com.google.common.collect.Maps; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Map; + +/** + * Created by amajumdar on 5/7/15. + */ +public class PartitionUtil { + private static final Splitter EQUAL_SPLITTER = Splitter.on('=').trimResults(); + private static final Splitter SLASH_SPLITTER = Splitter.on('/').omitEmptyStrings().trimResults(); + private static final Logger log = LoggerFactory.getLogger(PartitionUtil.class); + public static Map getPartitionKeyValues(String location) { + Map parts = Maps.newLinkedHashMap(); + getPartitionKeyValues(location, parts); + return parts; + } + + public static void getPartitionKeyValues(String location, Map parts) { + for (String part : Splitter.on('/').omitEmptyStrings().split(location)) { + if (part.contains("=")) { + String[] values = part.split("=", 2); + + if(values[0].equalsIgnoreCase("null") || values[1].equalsIgnoreCase("null")) { + log.debug("Found 'null' string in kvp [{}] skipping.", part); + } else { + parts.put(values[0], values[1]); + } + } + } + } + + public static void validatePartitionName(String partitionName, List partitionKeys) { + if (partitionKeys == null || partitionKeys.isEmpty()) { + throw new IllegalStateException("No partitionKeys are defined"); + } + + for (String part : SLASH_SPLITTER.split(partitionName)) { + List tokens = EQUAL_SPLITTER.splitToList(part); + if (tokens.size() != 2) { + throw new IllegalArgumentException(String.format("Partition name '%s' is invalid", partitionName)); + } + String key = tokens.get(0); + String value = tokens.get(1); + if (!partitionKeys.contains(key) || value.isEmpty() || "null".equalsIgnoreCase(value)) { + throw new IllegalArgumentException(String.format("Partition name '%s' is invalid", partitionName)); + } + } + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionKeyParserEval.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionKeyParserEval.java new file mode 100644 index 000000000..697871dd7 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionKeyParserEval.java @@ -0,0 +1,79 @@ +package com.netflix.metacat.common.partition.visitor; + +import com.google.common.collect.Sets; +import com.netflix.metacat.common.partition.parser.ASTAND; +import com.netflix.metacat.common.partition.parser.ASTEQ; +import com.netflix.metacat.common.partition.parser.ASTEVAL; +import com.netflix.metacat.common.partition.parser.ASTFILTER; +import com.netflix.metacat.common.partition.parser.ASTGT; +import com.netflix.metacat.common.partition.parser.ASTGTE; +import com.netflix.metacat.common.partition.parser.ASTLIKE; +import com.netflix.metacat.common.partition.parser.ASTLT; +import com.netflix.metacat.common.partition.parser.ASTLTE; +import com.netflix.metacat.common.partition.parser.ASTMATCHES; +import com.netflix.metacat.common.partition.parser.ASTNEQ; +import com.netflix.metacat.common.partition.parser.ASTNEVAL; +import com.netflix.metacat.common.partition.parser.ASTNUM; +import com.netflix.metacat.common.partition.parser.ASTOR; +import com.netflix.metacat.common.partition.parser.ASTSTRING; +import com.netflix.metacat.common.partition.parser.ASTVAR; +import com.netflix.metacat.common.partition.parser.PartitionParserVisitor; +import com.netflix.metacat.common.partition.parser.SimpleNode; +import com.netflix.metacat.common.partition.parser.Variable; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +public class PartitionKeyParserEval extends PartitionParserEval { + + public String evalString(SimpleNode node, Object data) { + Object value1 = node.jjtGetChild(0).jjtAccept(this, data); + Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data); + Object value2 = node.jjtGetChild(2).jjtAccept(this, data); + if (comparison != Compare.EQ) { + return null; + } + return String.format("%s=%s", value1, value2.toString()); + } + + @SuppressWarnings("unchecked") + @Override + public Object visit(ASTAND node, Object data) { + Collection v1 = (Collection) node.jjtGetChild(0).jjtAccept(this, data); + Object b = node.jjtGetChild(1).jjtAccept(this, data); + v1.addAll((Collection) b); + return v1; + } + + @Override + public Object visit(ASTEQ node, Object data) { + return Compare.EQ; + } + + @Override + public Object visit(ASTEVAL node, Object data) { + Set result = Sets.newHashSet(); + String value = evalString(node, data); + if( value != null){ + result = Sets.newHashSet(value); + } + return result; + } + + @Override + public Object visit(ASTNEVAL node, Object data) { + return new HashSet(); + } + + @Override + public Object visit(ASTOR node, Object data) { + return new HashSet(); + } + + @Override + public Object visit(ASTVAR node, Object data) { + return ((Variable)node.jjtGetValue()).getName(); + } + +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionParamParserEval.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionParamParserEval.java new file mode 100644 index 000000000..fd912003d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionParamParserEval.java @@ -0,0 +1,16 @@ +package com.netflix.metacat.common.partition.visitor; + +import com.netflix.metacat.common.partition.parser.SimpleNode; + +public class PartitionParamParserEval extends PartitionKeyParserEval { + + public String evalString(SimpleNode node, Object data) { + Object value1 = node.jjtGetChild(0).jjtAccept(this, data); + if (!"dateCreated".equals(value1)) { + return null; + } + Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data); + Object value2 = node.jjtGetChild(2).jjtAccept(this, data); + return String.format("%s%s%s", value1, comparison.getExpression(),value2.toString()); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionParserEval.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionParserEval.java new file mode 100644 index 000000000..1e8b1e4c0 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/partition/visitor/PartitionParserEval.java @@ -0,0 +1,218 @@ +package com.netflix.metacat.common.partition.visitor; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import com.netflix.metacat.common.partition.parser.ASTAND; +import com.netflix.metacat.common.partition.parser.ASTEQ; +import com.netflix.metacat.common.partition.parser.ASTEVAL; +import com.netflix.metacat.common.partition.parser.ASTFILTER; +import com.netflix.metacat.common.partition.parser.ASTGT; +import com.netflix.metacat.common.partition.parser.ASTGTE; +import com.netflix.metacat.common.partition.parser.ASTLIKE; +import com.netflix.metacat.common.partition.parser.ASTLT; +import com.netflix.metacat.common.partition.parser.ASTLTE; +import com.netflix.metacat.common.partition.parser.ASTMATCHES; +import com.netflix.metacat.common.partition.parser.ASTNEQ; +import com.netflix.metacat.common.partition.parser.ASTNEVAL; +import com.netflix.metacat.common.partition.parser.ASTNUM; +import com.netflix.metacat.common.partition.parser.ASTOR; +import com.netflix.metacat.common.partition.parser.ASTSTRING; +import com.netflix.metacat.common.partition.parser.ASTVAR; +import com.netflix.metacat.common.partition.parser.PartitionParserVisitor; +import com.netflix.metacat.common.partition.parser.SimpleNode; +import com.netflix.metacat.common.partition.parser.Variable; + +import java.math.BigDecimal; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class PartitionParserEval implements PartitionParserVisitor { + public static final Pattern likePattern = Pattern.compile("(\\[%\\]|\\[_\\]|\\[\\[\\]|%|_)"); + public static final Map likeToRegexReplacements = new ImmutableMap.Builder() + .put("[%]", "%") + .put("[_]", "_") + .put("[[]", "[") + .put("%", ".*") + .put("_", ".").build(); + public enum Compare { + EQ("="), GT(">"), GTE(">="), LT("<"), LTE("<="), NEQ("!="), MATCHES("MATCHES"), LIKE("LIKE"); + String expression; + Compare(String expression) { + this.expression = expression; + } + public String getExpression(){ + return expression; + } + } + + private Map context; + + public PartitionParserEval() { + this(Maps.newHashMap()); + } + public PartitionParserEval(Map context) { + this.context = context; + } + + public Boolean eval(SimpleNode node, Object data) { + Object value1 = node.jjtGetChild(0).jjtAccept(this, data); + Compare comparison = (Compare) node.jjtGetChild(1).jjtAccept(this, data); + Object value2 = node.jjtGetChild(2).jjtAccept(this, data); + if (value2 instanceof String) { + return compare(comparison, value1.toString(), value2.toString()); + } + if (value2 instanceof BigDecimal) { + if (value1 instanceof String) { + value1 = new BigDecimal(value1.toString()); + } + return compare(comparison, (BigDecimal) value1, (BigDecimal) value2); + } + throw new RuntimeException("error processing partition filter"); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public Boolean compare(Compare comparison, Comparable value1, Comparable value2) { + if (value1 == null) { + switch (comparison) { + case EQ: + case LIKE: + case MATCHES: + return value2 == null; + case NEQ: + return value2 != null; + default: + return false; + } + } + if( comparison.equals(Compare.LIKE) || comparison.equals(Compare.MATCHES)){ + if( value2 != null){ + String value = value2.toString(); + if(comparison.equals(Compare.LIKE)){ + value = sqlLiketoRegexExpression(value); + } + return value1.toString().matches(value); + } + } else { + int compare = value1.compareTo(value2); + switch (comparison) { + case GT: + return compare > 0; + case GTE: + return compare >= 0; + case LT: + return compare < 0; + case LTE: + return compare <= 0; + case EQ: + return compare == 0; + case NEQ: + return compare != 0; + } + } + return false; + } + + //TODO: Need to escape regex meta characters + protected String sqlLiketoRegexExpression(String likeExpression) { + Matcher m = likePattern.matcher(likeExpression); + + StringBuffer builder = new StringBuffer(); + while(m.find()){ + m.appendReplacement(builder, likeToRegexReplacements.get(m.group())); + } + m.appendTail(builder); + return builder.toString(); + } + + @Override + public Object visit(ASTAND node, Object data) { + Boolean v1 = (Boolean) node.jjtGetChild(0).jjtAccept(this, data); + return v1 && (Boolean) node.jjtGetChild(1).jjtAccept(this, data); + } + + @Override + public Object visit(ASTEQ node, Object data) { + return Compare.EQ; + } + + @Override + public Object visit(ASTEVAL node, Object data) { + return eval(node, data); + } + + @Override + public Object visit(ASTFILTER node, Object data) { + return node.jjtGetChild(0).jjtAccept(this, data); + } + + @Override + public Object visit(ASTGT node, Object data) { + return Compare.GT; + } + + @Override + public Object visit(ASTGTE node, Object data) { + return Compare.GTE; + } + + @Override + public Object visit(ASTLT node, Object data) { + return Compare.LT; + } + + @Override + public Object visit(ASTLTE node, Object data) { + return Compare.LTE; + } + + @Override + public Object visit(ASTNEQ node, Object data) { + return Compare.NEQ; + } + + @Override + public Object visit(ASTMATCHES node, Object data) { + return Compare.MATCHES; + } + + @Override + public Object visit(ASTLIKE node, Object data) { + return Compare.LIKE; + } + + @Override + public Object visit(ASTNEVAL node, Object data) { + return !(Boolean) node.jjtGetChild(0).jjtAccept(this, data); + } + + @Override + public Object visit(ASTNUM node, Object data) { + return node.jjtGetValue(); + } + + @Override + public Object visit(ASTOR node, Object data) { + Boolean v1 = (Boolean) node.jjtGetChild(0).jjtAccept(this, data); + return v1 || (Boolean) node.jjtGetChild(1).jjtAccept(this, data); + } + + @Override + public Object visit(ASTSTRING node, Object data) { + return node.jjtGetValue(); + } + + @Override + public Object visit(ASTVAR node, Object data) { + if (!context.containsKey(((Variable)node.jjtGetValue()).getName())) { + throw new RuntimeException("Missing variable: " + ((Variable)node.jjtGetValue()).getName()); + } + return context.get(((Variable)node.jjtGetValue()).getName()); + } + + @Override + public Object visit(SimpleNode node, Object data) { + return null; + } + +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/ArchaiusConfigImpl.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/ArchaiusConfigImpl.java new file mode 100644 index 000000000..a38058ba5 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/ArchaiusConfigImpl.java @@ -0,0 +1,172 @@ +package com.netflix.metacat.common.server; + +import com.netflix.config.DynamicBooleanProperty; +import com.netflix.config.DynamicIntProperty; +import com.netflix.config.DynamicPropertyFactory; +import com.netflix.config.DynamicStringProperty; + +public class ArchaiusConfigImpl implements Config { + private final DynamicStringProperty defaultTypeConverter; + private final DynamicStringProperty elasticSearchClusterName; + private final DynamicStringProperty elasticSearchClusterNodes; + private final DynamicIntProperty elasticSearchClusterPort; + private final DynamicStringProperty elasticSearchRefreshExcludeDatabases; + private final DynamicStringProperty elasticSearchRefreshIncludeCatalogs; + private final DynamicStringProperty elasticSearchRefreshIncludeDatabases; + private final DynamicStringProperty elasticSearchRefreshPartitionsIncludeCatalogs; + private final DynamicIntProperty elasticSearchScrollFetchSize; + private final DynamicIntProperty elasticSearchScrollTimeout; + private final DynamicIntProperty elasticSearchThresholdUnmarkedDatabasesDelete; + private final DynamicIntProperty elasticSearchThresholdUnmarkedTablesDelete; + private final DynamicBooleanProperty epochInSeconds; + private final DynamicIntProperty eventBusExecutorThreadCount; + private final DynamicIntProperty eventBusThreadCount; + private final DynamicStringProperty hivePartitionWhitelistPattern; + private final DynamicStringProperty lookupServiceUserAdmin; + private final DynamicStringProperty pluginConfigLocation; + private final DynamicStringProperty tagServiceUserAdmin; + private final DynamicStringProperty metacatVersion; + private final DynamicBooleanProperty usePigTypes; + + public ArchaiusConfigImpl() { + this(DynamicPropertyFactory.getInstance()); + } + + public ArchaiusConfigImpl(DynamicPropertyFactory factory) { + this.defaultTypeConverter = factory + .getStringProperty("metacat.type.converter", "com.netflix.metacat.converters.impl.PrestoTypeConverter"); + this.elasticSearchClusterName = factory.getStringProperty("metacat.elacticsearch.cluster.name", null); + this.elasticSearchClusterNodes = factory.getStringProperty("metacat.elacticsearch.cluster.nodes", null); + this.elasticSearchClusterPort = factory.getIntProperty("metacat.elacticsearch.cluster.port", 7102); + this.elasticSearchRefreshExcludeDatabases = factory + .getStringProperty("metacat.elacticsearch.refresh.exclude.databases", null); + this.elasticSearchRefreshIncludeCatalogs = factory + .getStringProperty("metacat.elacticsearch.refresh.include.catalogs", null); + this.elasticSearchRefreshIncludeDatabases = factory + .getStringProperty("metacat.elacticsearch.refresh.include.databases", null); + this.elasticSearchRefreshPartitionsIncludeCatalogs = factory + .getStringProperty("metacat.elacticsearch.refresh.partitions.include.catalogs", + "prodhive,testhive,s3,aegisthus"); + this.elasticSearchScrollFetchSize = factory.getIntProperty("metacat.elacticsearch.scroll.fetch.size", 500); + this.elasticSearchScrollTimeout = factory.getIntProperty("metacat.elacticsearch.scroll.timeout.ms", 60000); + this.elasticSearchThresholdUnmarkedDatabasesDelete = factory + .getIntProperty("metacat.elacticsearch.refresh.threshold.unmarked.databases.delete", 100); + this.elasticSearchThresholdUnmarkedTablesDelete = factory + .getIntProperty("metacat.elacticsearch.refresh.threshold.unmarked.tables.delete", 1000); + this.epochInSeconds = factory.getBooleanProperty("metacat.type.epoch_in_seconds", true); + this.eventBusExecutorThreadCount = factory.getIntProperty("metacat.event.bus.executor.thread.count", 10); + this.eventBusThreadCount = factory.getIntProperty("metacat.event.thread.count", 10); + this.hivePartitionWhitelistPattern = factory + .getStringProperty("metacat.hive.metastore.partition.name.whitelist.pattern", ""); + this.lookupServiceUserAdmin = factory.getStringProperty("metacat.lookup_service.user_admin", "admin"); + this.metacatVersion = factory.getStringProperty("netflix.appinfo.version", "1.0.0"); + this.pluginConfigLocation = factory.getStringProperty("metacat.plugin.config.location", null); + this.tagServiceUserAdmin = factory.getStringProperty("metacat.tag_service.user_admin", "admin"); + this.usePigTypes = factory.getBooleanProperty("metacat.franklin.connector.use.pig.type", true); + } + + @Override + public String getDefaultTypeConverter() { + return defaultTypeConverter.get(); + } + + @Override + public String getElasticSearchClusterName() { + return elasticSearchClusterName.get(); + } + + @Override + public String getElasticSearchClusterNodes() { + return elasticSearchClusterNodes.get(); + } + + @Override + public int getElasticSearchClusterPort() { + return elasticSearchClusterPort.get(); + } + + @Override + public String getElasticSearchRefreshExcludeDatabases() { + return elasticSearchRefreshExcludeDatabases.get(); + } + + @Override + public String getElasticSearchRefreshIncludeCatalogs() { + return elasticSearchRefreshIncludeCatalogs.get(); + } + + @Override + public String getElasticSearchRefreshIncludeDatabases() { + return elasticSearchRefreshIncludeDatabases.get(); + } + + @Override + public String getElasticSearchRefreshPartitionsIncludeCatalogs() { + return elasticSearchRefreshPartitionsIncludeCatalogs.get(); + } + + @Override + public int getElasticSearchScrollFetchSize() { + return elasticSearchScrollFetchSize.get(); + } + + @Override + public int getElasticSearchScrollTimeout() { + return elasticSearchScrollTimeout.get(); + } + + @Override + public int getElasticSearchThresholdUnmarkedDatabasesDelete() { + return elasticSearchThresholdUnmarkedDatabasesDelete.get(); + } + + @Override + public int getElasticSearchThresholdUnmarkedTablesDelete() { + return elasticSearchThresholdUnmarkedTablesDelete.get(); + } + + @Override + public int getEventBusExecutorThreadCount() { + return eventBusExecutorThreadCount.get(); + } + + @Override + public int getEventBusThreadCount() { + return eventBusThreadCount.get(); + } + + @Override + public String getHivePartitionWhitelistPattern() { + return hivePartitionWhitelistPattern.get(); + } + + @Override + public String getLookupServiceUserAdmin() { + return lookupServiceUserAdmin.get(); + } + + @Override + public String getMetacatVersion() { + return metacatVersion.get(); + } + + @Override + public String getPluginConfigLocation() { + return pluginConfigLocation.get(); + } + + @Override + public String getTagServiceUserAdmin() { + return tagServiceUserAdmin.get(); + } + + @Override + public boolean isEpochInSeconds() { + return epochInSeconds.get(); + } + + @Override + public boolean isUsePigTypes() { + return usePigTypes.get(); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/CommonModule.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/CommonModule.java new file mode 100644 index 000000000..dd42322c4 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/CommonModule.java @@ -0,0 +1,48 @@ +package com.netflix.metacat.common.server; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.inject.AbstractModule; +import com.google.inject.TypeLiteral; +import com.google.inject.matcher.Matchers; +import com.google.inject.spi.InjectionListener; +import com.google.inject.spi.TypeEncounter; +import com.google.inject.spi.TypeListener; +import com.netflix.metacat.common.json.MetacatJson; +import com.netflix.metacat.common.json.MetacatJsonLocator; +import com.netflix.metacat.common.model.Lookup; +import com.netflix.metacat.common.model.TagItem; +import com.netflix.metacat.common.server.events.DeadEventHandler; +import com.netflix.metacat.common.server.events.MetacatEventBus; +import com.netflix.metacat.common.util.DataSourceManager; + +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; + +public class CommonModule extends AbstractModule { + @Override + protected void configure() { + Config config = new ArchaiusConfigImpl(); + + bind(Config.class).toInstance(config); + bind(MetacatJson.class).toInstance(MetacatJsonLocator.INSTANCE); + bind(DeadEventHandler.class).asEagerSingleton(); + bind(DataSourceManager.class).toInstance(DataSourceManager.get()); + MetacatEventBus eventBus = createMetacatEventBus(config); + bind(MetacatEventBus.class).toInstance(eventBus); + bindListener(Matchers.any(), new TypeListener() { + public void hear(TypeLiteral typeLiteral, TypeEncounter typeEncounter) { + typeEncounter.register((InjectionListener) eventBus::register); + } + }); + + // Injecting statics is a bad pattern and should be avoided, but I am doing it as a first step to allow + // us to remove the hard coded username. + binder().requestStaticInjection(Lookup.class, TagItem.class); + } + + protected MetacatEventBus createMetacatEventBus(Config config) { + ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("metacat-event-pool-%d").build(); + int threadCount = config.getEventBusThreadCount(); + return new MetacatEventBus("metacat-event-bus", Executors.newFixedThreadPool(threadCount, threadFactory)); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/Config.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/Config.java new file mode 100644 index 000000000..2a4523f90 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/Config.java @@ -0,0 +1,25 @@ +package com.netflix.metacat.common.server; + +public interface Config { + String getDefaultTypeConverter(); + String getElasticSearchClusterName(); + String getElasticSearchClusterNodes(); + int getElasticSearchClusterPort(); + int getElasticSearchScrollFetchSize(); + int getElasticSearchScrollTimeout(); + String getElasticSearchRefreshExcludeDatabases(); + String getElasticSearchRefreshIncludeCatalogs(); + String getElasticSearchRefreshIncludeDatabases(); + String getElasticSearchRefreshPartitionsIncludeCatalogs(); + int getElasticSearchThresholdUnmarkedDatabasesDelete(); + int getElasticSearchThresholdUnmarkedTablesDelete(); + int getEventBusExecutorThreadCount(); + int getEventBusThreadCount(); + String getHivePartitionWhitelistPattern(); + String getLookupServiceUserAdmin(); + String getMetacatVersion(); + String getPluginConfigLocation(); + String getTagServiceUserAdmin(); + boolean isEpochInSeconds(); + boolean isUsePigTypes(); +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/DeadEventHandler.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/DeadEventHandler.java new file mode 100644 index 000000000..5fb8c4306 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/DeadEventHandler.java @@ -0,0 +1,22 @@ +package com.netflix.metacat.common.server.events; + +import com.google.common.eventbus.AllowConcurrentEvents; +import com.google.common.eventbus.DeadEvent; +import com.google.common.eventbus.Subscribe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Singleton; + +@Singleton +public class DeadEventHandler { + private static final Logger log = LoggerFactory.getLogger(DeadEventHandler.class); + + @Subscribe + @AllowConcurrentEvents + public void logDeadEvent(DeadEvent event) { + Object sourceEvent = event.getEvent(); + Object source = event.getSource(); + log.debug("Unhandled event: {} from source: {}", sourceEvent, source); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePostEvent.java new file mode 100644 index 000000000..d636e8987 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePostEvent.java @@ -0,0 +1,38 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.DatabaseDto; + +import java.util.Objects; + +public class MetacatCreateDatabasePostEvent extends MetacatEvent { + private final DatabaseDto dto; + + public MetacatCreateDatabasePostEvent(DatabaseDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatCreateDatabasePostEvent)) return false; + if (!super.equals(o)) return false; + MetacatCreateDatabasePostEvent that = (MetacatCreateDatabasePostEvent) o; + return Objects.equals(dto, that.dto); + } + + public DatabaseDto getDto() { + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatCreateDatabasePostEvent{" + "dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePreEvent.java new file mode 100644 index 000000000..936d6066b --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateDatabasePreEvent.java @@ -0,0 +1,10 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatCreateDatabasePreEvent extends MetacatEvent { + public MetacatCreateDatabasePreEvent(QualifiedName name, MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPostEvent.java new file mode 100644 index 000000000..4b69191cb --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPostEvent.java @@ -0,0 +1,56 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatCreateMViewPostEvent extends MetacatEvent { + private final TableDto dto; + private final String filter; + private final Boolean snapshot; + + public MetacatCreateMViewPostEvent(TableDto dto, Boolean snapshot, String filter, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + this.snapshot = snapshot; + this.filter = filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatCreateMViewPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatCreateMViewPostEvent that = (MetacatCreateMViewPostEvent) o; + return Objects.equals(dto, that.dto) && + Objects.equals(snapshot, that.snapshot) && + Objects.equals(filter, that.filter); + } + + public TableDto getDto() { + return dto; + } + + public String getFilter() { + return filter; + } + + public Boolean getSnapshot() { + return snapshot; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto, snapshot, filter); + } + + @Override + public String toString() { + return "MetacatCreateMViewPostEvent{" + + "dto=" + dto + + ", snapshot=" + snapshot + + ", filter='" + filter + '\'' + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPreEvent.java new file mode 100644 index 000000000..849107978 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateMViewPreEvent.java @@ -0,0 +1,48 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.Objects; + +public class MetacatCreateMViewPreEvent extends MetacatEvent { + private final String filter; + private final Boolean snapshot; + + public MetacatCreateMViewPreEvent(QualifiedName name, Boolean snapshot, String filter, MetacatContext metacatContext) { + super( name, metacatContext); + this.snapshot = snapshot; + this.filter = filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatCreateMViewPreEvent)) return false; + if (!super.equals(o)) return false; + MetacatCreateMViewPreEvent that = (MetacatCreateMViewPreEvent) o; + return Objects.equals(snapshot, that.snapshot) && + Objects.equals(filter, that.filter); + } + + public String getFilter() { + return filter; + } + + public Boolean getSnapshot() { + return snapshot; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(snapshot, filter); + } + + @Override + public String toString() { + return "MetacatCreateMViewPreEvent{" + + ", snapshot=" + snapshot + + ", filter='" + filter + '\'' + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePostEvent.java new file mode 100644 index 000000000..bfc18aab9 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePostEvent.java @@ -0,0 +1,39 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatCreateTablePostEvent extends MetacatEvent { + private final TableDto dto; + + public MetacatCreateTablePostEvent(TableDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatCreateTablePostEvent)) return false; + if (!super.equals(o)) return false; + MetacatCreateTablePostEvent that = (MetacatCreateTablePostEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatCreateTablePostEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePreEvent.java new file mode 100644 index 000000000..87782adf7 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatCreateTablePreEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatCreateTablePreEvent extends MetacatEvent { + + public MetacatCreateTablePreEvent(QualifiedName name, MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePostEvent.java new file mode 100644 index 000000000..d8ff5fb9e --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePostEvent.java @@ -0,0 +1,39 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.DatabaseDto; + +import java.util.Objects; + +public class MetacatDeleteDatabasePostEvent extends MetacatEvent { + private final DatabaseDto dto; + + public MetacatDeleteDatabasePostEvent(DatabaseDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteDatabasePostEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteDatabasePostEvent that = (MetacatDeleteDatabasePostEvent) o; + return Objects.equals(dto, that.dto); + } + + public DatabaseDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatDeleteDatabasePostEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePreEvent.java new file mode 100644 index 000000000..0fcdb686f --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteDatabasePreEvent.java @@ -0,0 +1,39 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.DatabaseDto; + +import java.util.Objects; + +public class MetacatDeleteDatabasePreEvent extends MetacatEvent { + private final DatabaseDto dto; + + public MetacatDeleteDatabasePreEvent(DatabaseDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteDatabasePreEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteDatabasePreEvent that = (MetacatDeleteDatabasePreEvent) o; + return Objects.equals(dto, that.dto); + } + + public DatabaseDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatDeleteDatabasePreEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPostEvent.java new file mode 100644 index 000000000..08dc7f92b --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPostEvent.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.List; +import java.util.Objects; + +public class MetacatDeleteMViewPartitionPostEvent extends MetacatEvent { + private final List partitionIds; + + public MetacatDeleteMViewPartitionPostEvent(QualifiedName name, List partitionIds, MetacatContext metacatContext) { + super(name, metacatContext); + this.partitionIds = partitionIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteMViewPartitionPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteMViewPartitionPostEvent that = (MetacatDeleteMViewPartitionPostEvent) o; + return Objects.equals(partitionIds, that.partitionIds); + } + + public List getPartitionIds() { + return partitionIds; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitionIds); + } + + @Override + public String toString() { + return "MetacatDeleteMViewPartitionPostEvent{" + + ", partitions=" + partitionIds + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPreEvent.java new file mode 100644 index 000000000..da9f6c73d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPartitionPreEvent.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.List; +import java.util.Objects; + +public class MetacatDeleteMViewPartitionPreEvent extends MetacatEvent { + private final List partitionIds; + + public MetacatDeleteMViewPartitionPreEvent(QualifiedName name, List partitionIds, MetacatContext metacatContext) { + super( name, metacatContext); + this.partitionIds = partitionIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteMViewPartitionPreEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteMViewPartitionPreEvent that = (MetacatDeleteMViewPartitionPreEvent) o; + return Objects.equals(partitionIds, that.partitionIds); + } + + public List getPartitionIds() { + return partitionIds; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitionIds); + } + + @Override + public String toString() { + return "MetacatDeleteMViewPartitionPreEvent{" + + ", partitions=" + partitionIds + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPostEvent.java new file mode 100644 index 000000000..e293d0750 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPostEvent.java @@ -0,0 +1,38 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatDeleteMViewPostEvent extends MetacatEvent { + private final TableDto dto; + + public MetacatDeleteMViewPostEvent(TableDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteMViewPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteMViewPostEvent that = (MetacatDeleteMViewPostEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatDeleteMViewPostEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPreEvent.java new file mode 100644 index 000000000..5f49fbf25 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteMViewPreEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatDeleteMViewPreEvent extends MetacatEvent { + + public MetacatDeleteMViewPreEvent(QualifiedName name, MetacatContext metacatContext) { + super( name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPostEvent.java new file mode 100644 index 000000000..99b819dee --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPostEvent.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.List; +import java.util.Objects; + +public class MetacatDeleteTablePartitionPostEvent extends MetacatEvent { + private final List partitionIds; + + public MetacatDeleteTablePartitionPostEvent(QualifiedName name, List partitionIds, MetacatContext metacatContext) { + super( name, metacatContext); + this.partitionIds = partitionIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteTablePartitionPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteTablePartitionPostEvent that = (MetacatDeleteTablePartitionPostEvent) o; + return Objects.equals(partitionIds, that.partitionIds); + } + + public List getPartitionIds() { + return partitionIds; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitionIds); + } + + @Override + public String toString() { + return "MetacatDeleteTablePartitionPostEvent{" + + ", partitions=" + partitionIds + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPreEvent.java new file mode 100644 index 000000000..40460a579 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePartitionPreEvent.java @@ -0,0 +1,40 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.List; +import java.util.Objects; + +public class MetacatDeleteTablePartitionPreEvent extends MetacatEvent { + private final List partitionIds; + + public MetacatDeleteTablePartitionPreEvent(QualifiedName name, List partitionIds, MetacatContext metacatContext) { + super( name, metacatContext); + this.partitionIds = partitionIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteTablePartitionPreEvent)) return false; + MetacatDeleteTablePartitionPreEvent that = (MetacatDeleteTablePartitionPreEvent) o; + return Objects.equals(partitionIds, that.partitionIds); + } + + public List getPartitionIds() { + return partitionIds; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitionIds); + } + + @Override + public String toString() { + return "MetacatDeleteTablePartitionPreEvent{" + + ", partitions=" + partitionIds + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePostEvent.java new file mode 100644 index 000000000..32664f0fe --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePostEvent.java @@ -0,0 +1,39 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatDeleteTablePostEvent extends MetacatEvent { + private final TableDto dto; + + public MetacatDeleteTablePostEvent(TableDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatDeleteTablePostEvent)) return false; + if (!super.equals(o)) return false; + MetacatDeleteTablePostEvent that = (MetacatDeleteTablePostEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatDeleteTablePostEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePreEvent.java new file mode 100644 index 000000000..034ef8a5d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatDeleteTablePreEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatDeleteTablePreEvent extends MetacatEvent { + + public MetacatDeleteTablePreEvent(QualifiedName name, MetacatContext metacatContext) { + super( name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEvent.java new file mode 100644 index 000000000..5776eea61 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.Objects; + +public class MetacatEvent { + private final QualifiedName name; + private final MetacatContext metacatContext; + + public MetacatEvent(QualifiedName name, MetacatContext metacatContext) { + this.name = name; + this.metacatContext = metacatContext; + } + + public QualifiedName getName() { + return name; + } + + public MetacatContext getMetacatContext() { + return metacatContext; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatEvent)) return false; + MetacatEvent that = (MetacatEvent) o; + return Objects.equals(name, that.name) && Objects.equals(metacatContext, that.metacatContext); + } + + @Override + public int hashCode() { + return Objects.hash(name, metacatContext); + } + + @Override + public String toString() { + return "MetacatEvent{" + "name=" + name + ", metacatContext=" + metacatContext + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEventBus.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEventBus.java new file mode 100644 index 000000000..547af2897 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatEventBus.java @@ -0,0 +1,21 @@ +package com.netflix.metacat.common.server.events; + +import com.google.common.eventbus.AsyncEventBus; +import com.google.common.eventbus.SubscriberExceptionHandler; + +import java.util.concurrent.Executor; + +public class MetacatEventBus extends AsyncEventBus { + public MetacatEventBus(String identifier, Executor executor) { + super(identifier, executor); + } + + public MetacatEventBus(Executor executor, + SubscriberExceptionHandler subscriberExceptionHandler) { + super(executor, subscriberExceptionHandler); + } + + public MetacatEventBus(Executor executor) { + super(executor); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePostEvent.java new file mode 100644 index 000000000..3edafed80 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePostEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatRenameDatabasePostEvent extends MetacatEvent { + public MetacatRenameDatabasePostEvent(QualifiedName name, + MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePreEvent.java new file mode 100644 index 000000000..842886005 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameDatabasePreEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatRenameDatabasePreEvent extends MetacatEvent { + public MetacatRenameDatabasePreEvent(QualifiedName name, + MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPostEvent.java new file mode 100644 index 000000000..ebb0258a9 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPostEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatRenameMViewPostEvent extends MetacatEvent { + public MetacatRenameMViewPostEvent(QualifiedName name, + MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPreEvent.java new file mode 100644 index 000000000..812bad79f --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameMViewPreEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatRenameMViewPreEvent extends MetacatEvent { + public MetacatRenameMViewPreEvent(QualifiedName name, + MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePostEvent.java new file mode 100644 index 000000000..0c1822fd4 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePostEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatRenameTablePostEvent extends MetacatEvent { + private final TableDto dto; + + public MetacatRenameTablePostEvent(QualifiedName oldName, TableDto dto, MetacatContext metacatContext) { + super(oldName, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatRenameTablePostEvent)) return false; + if (!super.equals(o)) return false; + MetacatRenameTablePostEvent that = (MetacatRenameTablePostEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatRenameTablePostEvent{" + + "dto=" + dto + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePreEvent.java new file mode 100644 index 000000000..a2b7a5244 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatRenameTablePreEvent.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +import java.util.Objects; + +public class MetacatRenameTablePreEvent extends MetacatEvent { + private final QualifiedName newName; + + public MetacatRenameTablePreEvent(QualifiedName newName, QualifiedName oldName, MetacatContext metacatContext) { + super(oldName, metacatContext); + this.newName = newName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatRenameTablePreEvent)) return false; + if (!super.equals(o)) return false; + MetacatRenameTablePreEvent that = (MetacatRenameTablePreEvent) o; + return Objects.equals(newName, that.newName); + } + + public QualifiedName getNewName() { + + return newName; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(newName); + } + + @Override + public String toString() { + return "MetacatRenameTablePreEvent{" + + "newName=" + newName + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPostEvent.java new file mode 100644 index 000000000..94a7de89d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPostEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; + +import java.util.List; +import java.util.Objects; + +public class MetacatSaveMViewPartitionPostEvent extends MetacatEvent { + private final List partitions; + + public MetacatSaveMViewPartitionPostEvent(QualifiedName name, List partitions, MetacatContext metacatContext) { + super( name, metacatContext); + this.partitions = partitions; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatSaveMViewPartitionPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatSaveMViewPartitionPostEvent that = (MetacatSaveMViewPartitionPostEvent) o; + return Objects.equals(partitions, that.partitions); + } + + public List getPartitions() { + return partitions; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitions); + } + + @Override + public String toString() { + return "MetacatSaveMViewPartitionPostEvent{" + + "partitions=" + partitions + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPreEvent.java new file mode 100644 index 000000000..87b993988 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveMViewPartitionPreEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; + +import java.util.List; +import java.util.Objects; + +public class MetacatSaveMViewPartitionPreEvent extends MetacatEvent { + private final List partitions; + + public MetacatSaveMViewPartitionPreEvent(QualifiedName name, List partitions, MetacatContext metacatContext) { + super( name, metacatContext); + this.partitions = partitions; + } + + @Override + public boolean equals(Object o) { + + if (this == o) return true; + if (!(o instanceof MetacatSaveMViewPartitionPreEvent)) return false; + MetacatSaveMViewPartitionPreEvent that = (MetacatSaveMViewPartitionPreEvent) o; + return Objects.equals(partitions, that.partitions); + } + + public List getPartitions() { + return partitions; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitions); + } + + @Override + public String toString() { + return "MetacatSaveMViewPartitionPreEvent{" + + "partitions=" + partitions + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPostEvent.java new file mode 100644 index 000000000..3f6326910 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPostEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; + +import java.util.List; +import java.util.Objects; + +public class MetacatSaveTablePartitionPostEvent extends MetacatEvent { + private final List partitions; + + public MetacatSaveTablePartitionPostEvent(QualifiedName name, List partitions, MetacatContext metacatContext) { + super( name, metacatContext); + this.partitions = partitions; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatSaveTablePartitionPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatSaveTablePartitionPostEvent that = (MetacatSaveTablePartitionPostEvent) o; + return Objects.equals(partitions, that.partitions); + } + + public List getPartitions() { + return partitions; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitions); + } + + @Override + public String toString() { + return "MetacatSaveTablePartitionPostEvent{" + + "partitions=" + partitions + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPreEvent.java new file mode 100644 index 000000000..2571bf633 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatSaveTablePartitionPreEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; + +import java.util.List; +import java.util.Objects; + +public class MetacatSaveTablePartitionPreEvent extends MetacatEvent { + private final List partitions; + + public MetacatSaveTablePartitionPreEvent(QualifiedName name, List partitions, MetacatContext metacatContext) { + super(name, metacatContext); + this.partitions = partitions; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatSaveTablePartitionPreEvent)) return false; + if (!super.equals(o)) return false; + MetacatSaveTablePartitionPreEvent that = (MetacatSaveTablePartitionPreEvent) o; + return Objects.equals(partitions, that.partitions); + } + + public List getPartitions() { + return partitions; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(partitions); + } + + @Override + public String toString() { + return "MetacatSaveTablePartitionPreEvent{" + + "partitions=" + partitions + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePostEvent.java new file mode 100644 index 000000000..3813a1a32 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePostEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatUpdateDatabasePostEvent extends MetacatEvent { + public MetacatUpdateDatabasePostEvent(QualifiedName name, + MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePreEvent.java new file mode 100644 index 000000000..5a3db295c --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateDatabasePreEvent.java @@ -0,0 +1,11 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; + +public class MetacatUpdateDatabasePreEvent extends MetacatEvent { + public MetacatUpdateDatabasePreEvent(QualifiedName name, + MetacatContext metacatContext) { + super(name, metacatContext); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPostEvent.java new file mode 100644 index 000000000..65f7fd9ed --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPostEvent.java @@ -0,0 +1,39 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatUpdateMViewPostEvent extends MetacatEvent { + private final TableDto dto; + + public MetacatUpdateMViewPostEvent(TableDto dto, MetacatContext metacatContext) { + super( dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatUpdateMViewPostEvent)) return false; + if (!super.equals(o)) return false; + MetacatUpdateMViewPostEvent that = (MetacatUpdateMViewPostEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatUpdateMViewPostEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPreEvent.java new file mode 100644 index 000000000..09e03a56f --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateMViewPreEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatUpdateMViewPreEvent extends MetacatEvent { + private final TableDto dto; + + public MetacatUpdateMViewPreEvent(QualifiedName name, TableDto dto, MetacatContext metacatContext) { + super(name, metacatContext); + this.dto = dto; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatUpdateMViewPreEvent)) return false; + if (!super.equals(o)) return false; + MetacatUpdateMViewPreEvent that = (MetacatUpdateMViewPreEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + + return dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatUpdateMViewPreEvent{" + + "dto=" + dto + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePostEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePostEvent.java new file mode 100644 index 000000000..2bbb423c2 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePostEvent.java @@ -0,0 +1,47 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatUpdateTablePostEvent extends MetacatEvent { + private TableDto dto; + + public MetacatUpdateTablePostEvent(TableDto dto, MetacatContext metacatContext) { + super(dto!=null?dto.getName():null, metacatContext); + this.dto = dto; + } + + public MetacatUpdateTablePostEvent(QualifiedName name, MetacatContext metacatContext) { + super(name, metacatContext); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatUpdateTablePostEvent)) return false; + if (!super.equals(o)) return false; + MetacatUpdateTablePostEvent that = (MetacatUpdateTablePostEvent) o; + return Objects.equals(dto, that.dto); + } + + public TableDto getDto() { + return dto; + } + + public void setDto(TableDto dto) { + this.dto = dto; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash(dto); + } + + @Override + public String toString() { + return "MetacatUpdateTablePostEvent{dto=" + dto + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePreEvent.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePreEvent.java new file mode 100644 index 000000000..a20d017b7 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/server/events/MetacatUpdateTablePreEvent.java @@ -0,0 +1,42 @@ +package com.netflix.metacat.common.server.events; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.TableDto; + +import java.util.Objects; + +public class MetacatUpdateTablePreEvent extends MetacatEvent { + private final TableDto table; + + public MetacatUpdateTablePreEvent(QualifiedName name, TableDto table, MetacatContext metacatContext) { + super(name, metacatContext); + this.table = table; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof MetacatUpdateTablePreEvent)) return false; + if (!super.equals(o)) return false; + MetacatUpdateTablePreEvent that = (MetacatUpdateTablePreEvent) o; + return Objects.equals(table, that.table); + } + + public TableDto getTable() { + + return table; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hash( table); + } + + @Override + public String toString() { + return "MetacatUpdateTablePreEvent{" + + ", table=" + table + + '}'; + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/BaseUserMetadataService.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/BaseUserMetadataService.java new file mode 100644 index 000000000..385535e42 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/BaseUserMetadataService.java @@ -0,0 +1,67 @@ +package com.netflix.metacat.common.usermetadata; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.dto.HasDataMetadata; +import com.netflix.metacat.common.dto.HasDefinitionMetadata; +import com.netflix.metacat.common.dto.HasMetadata; + +import java.util.Optional; + +/** + * Created by amajumdar on 4/13/15. + */ +public abstract class BaseUserMetadataService implements UserMetadataService{ + public void saveMetadata(String userId, HasMetadata holder, boolean merge) { + if (holder instanceof HasDefinitionMetadata) { + HasDefinitionMetadata defDto = (HasDefinitionMetadata) holder; + + // If the user is updating the definition metadata do a merge on the existing metadata + ObjectNode newMetadata = defDto.getDefinitionMetadata(); + if (newMetadata != null) { + HasDefinitionMetadata definitionDto = (HasDefinitionMetadata) holder; + saveDefinitionMetadata(definitionDto.getDefinitionName(), userId, Optional.of(newMetadata), merge); + } + } + + if (holder instanceof HasDataMetadata) { + HasDataMetadata dataDto = (HasDataMetadata) holder; + + // If the user is updating the data metadata and a separate data location exists, + // do a merge on the existing metadata + ObjectNode newMetadata = dataDto.getDataMetadata(); + if (newMetadata != null && dataDto.isDataExternal()) { + saveDataMetadata(dataDto.getDataUri(), userId, Optional.of(newMetadata), merge); + } + } + } + + public void populateMetadata(HasMetadata holder) { + Optional metadata = Optional.empty(); + if (holder instanceof HasDataMetadata) { + HasDataMetadata dataDto = (HasDataMetadata) holder; + if (dataDto.isDataExternal()) { + metadata = getDataMetadata(dataDto.getDataUri()); + } + } + Optional definitionMetadata = Optional.empty(); + if (holder instanceof HasDefinitionMetadata) { + HasDefinitionMetadata definitionDto = (HasDefinitionMetadata) holder; + definitionMetadata = getDefinitionMetadata(definitionDto.getDefinitionName()); + } + populateMetadata( holder, definitionMetadata.orElse(null), metadata.orElse(null)); + } + + public void populateMetadata(HasMetadata holder, ObjectNode definitionMetadata, ObjectNode dataMetadata) { + if (holder instanceof HasDefinitionMetadata) { + HasDefinitionMetadata defDto = (HasDefinitionMetadata) holder; + defDto.setDefinitionMetadata(definitionMetadata); + } + + if (holder instanceof HasDataMetadata) { + HasDataMetadata dataDto = (HasDataMetadata) holder; + if (dataDto.isDataExternal()) { + dataDto.setDataMetadata(dataMetadata); + } + } + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/LookupService.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/LookupService.java new file mode 100644 index 000000000..0efbbc8ae --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/LookupService.java @@ -0,0 +1,56 @@ +package com.netflix.metacat.common.usermetadata; + +import com.netflix.metacat.common.model.Lookup; + +import java.util.Set; + +/** + * Created by amajumdar on 7/6/15. + */ +public interface LookupService { + /** + * Returns the lookup for the given name + * @param name lookup name + * @return lookup + */ + Lookup get(String name); + /** + * Returns the value of the lookup name + * @param name lookup name + * @return scalar lookup value + */ + String getValue(String name); + /** + * Returns the list of values of the lookup name + * @param name lookup name + * @return list of lookup values + */ + Set getValues(String name); + /** + * Returns the list of values of the lookup name + * @param lookupId lookup id + * @return list of lookup values + */ + Set getValues(Long lookupId); + /** + * Saves the lookup value + * @param name lookup name + * @param values multiple values + * @return + */ + Lookup setValues(String name, Set values); + /** + * Saves the lookup value + * @param name lookup name + * @param values multiple values + * @return + */ + Lookup addValues(String name, Set values); + /** + * Saves the lookup value + * @param name lookup name + * @param value lookup value + * @return + */ + Lookup setValue(String name, String value); +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/TagService.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/TagService.java new file mode 100644 index 000000000..9d0d71935 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/TagService.java @@ -0,0 +1,87 @@ +package com.netflix.metacat.common.usermetadata; + +import com.netflix.metacat.common.QualifiedName; + +import java.util.List; +import java.util.Set; + +/** + * Created by amajumdar on 6/29/15. + */ +public interface TagService { + /** + * Returns the list of tags + * @return list of tag names + */ + Set getTags(); + + /** + * Returns the list of QualifiedName of items that are tagged by the given includeTags and + * do not contain the given excludeTags + * @param includeTags include items that contain tags + * @param excludeTags include items that do not contain tags + * @param sourceName catalog/source name + * @param databaseName database name + * @param tableName table name + * @return list of qualified names of the items + */ + List list( + Set includeTags, + Set excludeTags, + String sourceName, + String databaseName, + String tableName); + + /** + * Returns the list of QualifiedName of items that have tags containing the given tag text. + * @param tag partial text of a tag + * @param sourceName source/catalog name + * @param databaseName database name + * @param tableName table name + * @return list of qualified names of the items + */ + List search( + String tag, + String sourceName, + String databaseName, + String tableName); + + /** + * Tags the given table with the given tags + * @param qualifiedName, table name + * @param tags list of tags + * @return return the complete list of tags associated with the table + */ + Set setTableTags( + QualifiedName qualifiedName, + Set tags, + boolean updateUserMetadata); + + /** + * Removes the tags from the given table + * @param qualifiedName table name + * @param deleteAll if true, will delete all tags associated with the given table + * @param tags list of tags to be removed for the given table + */ + Void removeTableTags( + QualifiedName qualifiedName, + Boolean deleteAll, + Set tags, + boolean updateUserMetadata); + + /** + * Delete the tag item along with its associated tags. + * @param name table name + * @return null + */ + Void delete(QualifiedName name, + boolean updateUserMetadata); + + /** + * Renames the tag item name with the new table name + * @param name table qualified name + * @param newTableName new table name + * @return null + */ + Void rename(QualifiedName name, String newTableName); +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/UserMetadataService.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/UserMetadataService.java new file mode 100644 index 000000000..fcb87463f --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/UserMetadataService.java @@ -0,0 +1,60 @@ +package com.netflix.metacat.common.usermetadata; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.DefinitionMetadataDto; +import com.netflix.metacat.common.dto.HasMetadata; + +import javax.annotation.Nonnull; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +public interface UserMetadataService { + String METACAT_USERMETADATA_CONFIG_LOCATION = "metacat.usermetadata.config.location"; + + void deleteDataMetadatas(@Nonnull List uris); + + void deleteDefinitionMetadatas(@Nonnull List names); + + void deleteMetadatas(List holders, boolean force); + + @Nonnull + Optional getDataMetadata(@Nonnull String uri); + + @Nonnull + Map getDataMetadataMap(@Nonnull List uris); + + @Nonnull + Optional getDefinitionMetadata(@Nonnull QualifiedName name); + + @Nonnull + Map getDefinitionMetadataMap(@Nonnull List names); + + void saveDataMetadata(@Nonnull String uri, @Nonnull String userId, @Nonnull Optional metadata, boolean merge); + + void saveDefinitionMetadata(@Nonnull QualifiedName name, @Nonnull String userId, + @Nonnull Optional metadata, boolean merge); + + void saveMetadata(String userId, HasMetadata holder, boolean merge); + + void populateMetadata(HasMetadata holder); + + void populateMetadata(HasMetadata holder, ObjectNode definitionMetadata, ObjectNode dataMetadata); + + int renameDataMetadataKey(@Nonnull String oldUri, @Nonnull String newUri); + + int renameDefinitionMetadataKey(@Nonnull QualifiedName oldName, @Nonnull QualifiedName newName); + + void start() throws Exception; + + void stop() throws Exception; + + void saveMetadatas(String user, List holders, boolean merge); + + List searchDefinitionMetadatas(Set propertyNames, String type, String name + , String sortBy, String sortOrder, Integer offset, Integer limit); + + List searchByOwners(Set owners); +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/UserMetadataServiceException.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/UserMetadataServiceException.java new file mode 100644 index 000000000..1543be754 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/usermetadata/UserMetadataServiceException.java @@ -0,0 +1,12 @@ +package com.netflix.metacat.common.usermetadata; + +import java.sql.SQLException; + +/** + * Created by amajumdar on 3/16/16. + */ +public class UserMetadataServiceException extends RuntimeException { + public UserMetadataServiceException(String m, Exception e) { + super(m, e); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/util/DataSourceManager.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/util/DataSourceManager.java new file mode 100644 index 000000000..e56b22d8d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/util/DataSourceManager.java @@ -0,0 +1,84 @@ +package com.netflix.metacat.common.util; + +import com.google.common.collect.Maps; +import org.apache.tomcat.jdbc.pool.DataSourceFactory; +import org.apache.tomcat.jdbc.pool.DataSourceProxy; + +import javax.annotation.PreDestroy; +import javax.sql.DataSource; +import java.sql.Driver; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; + +/** + * Created by amajumdar on 4/7/15. + */ +public class DataSourceManager { + private static final String JDO_PREFIX = "javax.jdo.option."; + private Map dataSources = Maps.newConcurrentMap(); + private static DataSourceManager instance = new DataSourceManager(); + + private DataSourceManager(){} + // + // This method has been provided so that it can be used in the connectors. We could have injected into the plugins. + // + public static DataSourceManager get(){ + return instance; + } + + public DataSourceManager load(String catalogName, Map properties){ + if( dataSources.get(catalogName) == null){ + createDataSource( catalogName, properties); + } + return this; + } + + public DataSourceManager load(String catalogName, Properties properties){ + if( dataSources.get(catalogName) == null){ + createDataSource( catalogName, properties); + } + return this; + } + + public DataSource get(String catalogName){ + return dataSources.get(catalogName); + } + + public Driver getDriver(String catalogName, Driver driver){ + DataSource dataSource = get(catalogName); + return dataSource!=null? new JdbcDriver(driver, dataSource):driver; + } + + private synchronized void createDataSource(String catalogName, Map props) { + if( dataSources.get(catalogName) == null) { + Properties dataSourceProperties = new Properties(); + props.forEach((key, value) -> { + String prop = String.valueOf(key); + if (prop.startsWith(JDO_PREFIX)) { + dataSourceProperties.put(prop.substring(JDO_PREFIX.length()), value); + } + }); + if( !dataSourceProperties.isEmpty()) { + try { + DataSource dataSource = new DataSourceFactory().createDataSource(dataSourceProperties); + dataSources.put(catalogName, dataSource); + } catch (Exception e) { + throw new RuntimeException(String.format("Failed to load the data source for catalog %s with error [%s]", catalogName, e.getMessage()), e); + } + } + } + } + + @PreDestroy + public void close(){ + Iterator iter = dataSources.values().iterator(); + while(iter.hasNext()){ + DataSourceProxy dataSource = (DataSourceProxy) iter.next(); + if( dataSource != null) { + dataSource.close(); + } + iter.remove(); + } + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/util/JdbcDriver.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/util/JdbcDriver.java new file mode 100644 index 000000000..cbb90542d --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/util/JdbcDriver.java @@ -0,0 +1,60 @@ +package com.netflix.metacat.common.util; + +import com.google.common.base.Throwables; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.Driver; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Properties; +import java.util.logging.Logger; + +/** + * Created by amajumdar on 3/15/16. + */ +public class JdbcDriver implements Driver { + private DataSource datasource; + private Driver driver; + + public JdbcDriver(Driver driver, DataSource datasource) { + this.driver = driver; + this.datasource = datasource; + } + + @Override + public Connection connect(String url, Properties info) throws SQLException { + return datasource.getConnection(); + } + + @Override + public boolean acceptsURL(String url) throws SQLException { + return driver.acceptsURL(url); + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + return driver.getPropertyInfo( url, info); + } + + @Override + public int getMajorVersion() { + return driver.getMajorVersion(); + } + + @Override + public int getMinorVersion() { + return driver.getMinorVersion(); + } + + @Override + public boolean jdbcCompliant() { + return driver.jdbcCompliant(); + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + return driver.getParentLogger(); + } +} diff --git a/metacat-common-server/src/main/java/com/netflix/metacat/common/util/MetacatContextManager.java b/metacat-common-server/src/main/java/com/netflix/metacat/common/util/MetacatContextManager.java new file mode 100644 index 000000000..603d68503 --- /dev/null +++ b/metacat-common-server/src/main/java/com/netflix/metacat/common/util/MetacatContextManager.java @@ -0,0 +1,27 @@ +package com.netflix.metacat.common.util; + +import com.netflix.metacat.common.MetacatContext; + +/** + * Created by amajumdar on 8/3/15. + */ +public class MetacatContextManager { + private static InheritableThreadLocal context = new InheritableThreadLocal(); + + public static void removeContext() { + context.remove(); + } + + public static void setContext(MetacatContext context) { + MetacatContextManager.context.set(context); + } + + public static MetacatContext getContext() { + MetacatContext result = context.get(); + if(result == null) { + result = new MetacatContext(null, null, null, null, null); + setContext(result); + } + return result; + } +} diff --git a/metacat-common-server/src/test/groovy/com/netflix/metacat/common/partition/util/FilterPartitionSpec.groovy b/metacat-common-server/src/test/groovy/com/netflix/metacat/common/partition/util/FilterPartitionSpec.groovy new file mode 100644 index 000000000..d33f16860 --- /dev/null +++ b/metacat-common-server/src/test/groovy/com/netflix/metacat/common/partition/util/FilterPartitionSpec.groovy @@ -0,0 +1,56 @@ +package com.netflix.metacat.common.partition.util + +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +/** + * Created by amajumdar on 3/1/16. + */ +class FilterPartitionSpec extends Specification{ + @Shared + def filterPartition = new FilterPartition() + + @Unroll + def 'evaluate expression #expression for name #name to #result'(){ + expect: + filterPartition.evaluatePartitionExpression( expression, name, null) == result + where: + name | expression | result + "dateint=1" | "dateint>1" | false + "dateint=1" | "dateint>=1" | true + "dateint=1" | "dateint<1" | false + "dateint=1" | "dateint<=1" | true + "dateint=1" | "dateint==1" | true + "dateint=1" | "dateint!=1" | false + "dateint=1" | "(dateint>1) or (dateint<1)" | false + "dateint=1" | "(dateint>1) or (dateint<=1)" | true + "dateint=1" | "(dateint>1) and (dateint<=1)" | false + "dateint=1" | "(dateint==1) and ((dateint<=1) or (dateint>=1))" | true + "dateint=1" | "(((dateint>1) or (dateint<=1)) and (dateint==1))" | true + "dateint=1" | "('12' < 2)" | false + "dateint=1" | "(12 < 2)" | false + "dateint=1" | "('12' < '2')" | true + "dateint=1" | "(12 < '2')" | true + "dateint=1" | "(batchid>=1)" | false + + "dateint=12" | "(dateint < 2)" | false + "dateint=12" | "(dateint <= 2)" | false + "dateint=12" | "(dateint < '2')" | true + "dateint=12" | "(dateint <= '2')" | true + "dateint=12" | "(dateint2 != 2)" | false + "dateint=12" | "(dateint2 == 12)" | false + + "apath" | "(batchid>=1)" | false + + "dateint=1/batchid=2" | "(batchid>=1)" | true + "dateint=1/batchid=2" | "((dateint==1) and (batchid>=1))" | true + + "dateint=1/type=java" | "((dateint==1) and (type=='java'))" | true + "dateint=1/type=java" | "((dateint==1) and (type=='bava'))" | false + + "dateint=1/type=java" | "(dateint>1 and type=='java') or (dateint==1 and type=='java')" | true + "dateint=1/type=java" | "(dateint>1 or dateint<1) and (type=='bava' or type=='java')" | false + "dateint=1/type=java" | "(dateint>1 or dateint<1) or (type=='bava' or type=='java')" | true + } +} diff --git a/metacat-common-server/src/test/groovy/com/netflix/metacat/common/partition/visitor/PartitionParserEvalSpec.groovy b/metacat-common-server/src/test/groovy/com/netflix/metacat/common/partition/visitor/PartitionParserEvalSpec.groovy new file mode 100644 index 000000000..2b90152cf --- /dev/null +++ b/metacat-common-server/src/test/groovy/com/netflix/metacat/common/partition/visitor/PartitionParserEvalSpec.groovy @@ -0,0 +1,29 @@ +package com.netflix.metacat.common.partition.visitor + +import spock.lang.Shared +import spock.lang.Specification + +/** + * Created by amajumdar on 3/2/16. + */ +class PartitionParserEvalSpec extends Specification{ + @Shared def eval = new PartitionParserEval() + + def 'sql #sql to regex #regex'(){ + expect: + eval.sqlLiketoRegexExpression(sql) == regex + where: + sql | regex + "5[%]" | "5%" + "[_]n" | "_n" + "[a-cdf]" | "[a-cdf]" + "[-acdf]" | "[-acdf]" + "[[]" | "[" + "]" | "]" + "abc[_]d%" | "abc_d.*" + "abc[def]" | "abc[def]" + "[def%]" | "[def.*]" + "[def_]" | "[def.]" + "" | "" + } +} diff --git a/metacat-common/build.gradle b/metacat-common/build.gradle new file mode 100644 index 000000000..6f7204f72 --- /dev/null +++ b/metacat-common/build.gradle @@ -0,0 +1,18 @@ +apply plugin: 'java' + +compileJava { + sourceCompatibility = '1.7' + targetCompatibility = '1.7' +} + +dependencies { + compile "com.fasterxml.jackson.core:jackson-annotations:${jackson_version}" + compile "com.fasterxml.jackson.core:jackson-core:${jackson_version}" + compile "com.fasterxml.jackson.core:jackson-databind:${jackson_version}" + compile 'com.google.code.findbugs:jsr305:3.0.0' + compile "com.wordnik:swagger-annotations:${swagger_version}" + compile 'javax.ws.rs:jsr311-api:1.1.1' + testCompile "com.google.guava:guava:${guava_version}" + testCompile 'nl.jqno.equalsverifier:equalsverifier:1.7.2' + testCompile 'org.apache.commons:commons-lang3:3.4' +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/MetacatContext.java b/metacat-common/src/main/java/com/netflix/metacat/common/MetacatContext.java new file mode 100644 index 000000000..1bd6de57b --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/MetacatContext.java @@ -0,0 +1,51 @@ +package com.netflix.metacat.common; + +/** + * Created by amajumdar on 8/3/15. + */ +public class MetacatContext { + public static final String HEADER_KEY_USER_NAME = "X-Netflix.user.name"; + public static final String HEADER_KEY_CLIENT_APP_NAME = "X-Netflix.client.app.name"; + public static final String HEADER_KEY_JOB_ID = "X-Netflix.job.id"; + public static final String HEADER_KEY_DATA_TYPE_CONTEXT = "X-Netflix.data.type.context"; + private final String userName; + private final String clientAppName; + private final String clientId; + private final String jobId; + private final String dataTypeContext; + public enum DATA_TYPE_CONTEXTS {hive, pig, presto} + public MetacatContext(String userName, String clientAppName, String clientId, String jobId, String dataTypeContext) { + this.userName = userName; + this.clientAppName = clientAppName; + this.clientId = clientId; + this.jobId = jobId; + this.dataTypeContext = dataTypeContext; + } + + public String getUserName() { + return userName; + } + + public String getClientAppName() { + return clientAppName; + } + + public String getJobId() { + return jobId; + } + + public String getClientId() { + return clientId; + } + + public String getDataTypeContext() { + return dataTypeContext; + } + + @Override + public String toString() { + return "MetacatContext{" + "userName='" + userName + '\'' + ", clientAppName='" + clientAppName + '\'' + + ", clientId='" + clientId + '\'' + ", jobId='" + jobId + '\'' + ", dataTypeContext='" + + dataTypeContext + '\'' + '}'; + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/NameDateDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/NameDateDto.java new file mode 100644 index 000000000..613deabbc --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/NameDateDto.java @@ -0,0 +1,56 @@ +package com.netflix.metacat.common; + +import com.netflix.metacat.common.dto.BaseDto; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.util.Date; +import java.util.Objects; + +public class NameDateDto extends BaseDto { + private static final long serialVersionUID = -5713826608609231492L; + @ApiModelProperty(value = "The date the entity was created", required = false) + private Date createDate; + @ApiModelProperty(value = "The date the entity was last updated", required = false) + private Date lastUpdated; + @ApiModelProperty(value = "The entity's name", required = true) + private QualifiedName name; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof NameDateDto)) return false; + NameDateDto that = (NameDateDto) o; + return Objects.equals(name, that.name) && + Objects.equals(createDate, that.createDate) && + Objects.equals(lastUpdated, that.lastUpdated); + } + + public Date getCreateDate() { + return createDate; + } + + public void setCreateDate(Date createDate) { + this.createDate = createDate; + } + + public Date getLastUpdated() { + return lastUpdated; + } + + public void setLastUpdated(Date lastUpdated) { + this.lastUpdated = lastUpdated; + } + + public QualifiedName getName() { + return name; + } + + public void setName(QualifiedName name) { + this.name = name; + } + + @Override + public int hashCode() { + return Objects.hash(name, createDate, lastUpdated); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/QualifiedName.java b/metacat-common/src/main/java/com/netflix/metacat/common/QualifiedName.java new file mode 100644 index 000000000..3b8679ace --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/QualifiedName.java @@ -0,0 +1,321 @@ +package com.netflix.metacat.common; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; +import com.fasterxml.jackson.databind.JsonNode; +import com.netflix.metacat.common.dto.PartitionDto; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A fully qualified name that references a source of data + */ +public class QualifiedName implements Serializable { + private final String catalogName; + private final String databaseName; + private final String partitionName; + private final String tableName; + private final String viewName; + + private String qualifiedName; + private Map qualifiedNameMap; + + private QualifiedName( + @Nonnull String catalogName, + @Nullable String databaseName, + @Nullable String tableName, + @Nullable String partitionName, + @Nullable String viewName + ) { + this.catalogName = standardizeRequired("catalogName", catalogName); + this.databaseName = standardizeOptional(databaseName, true); + this.tableName = standardizeOptional(tableName, true); + this.partitionName = standardizeOptional(partitionName, false); + this.viewName = standardizeOptional(viewName, true); + + if (this.databaseName.isEmpty() && (!this.tableName.isEmpty() || !this.partitionName.isEmpty())) { + throw new IllegalStateException("databaseName is not present but tableName or partitionName are present"); + } else if (this.tableName.isEmpty() && !this.partitionName.isEmpty()) { + throw new IllegalStateException("tableName is not present but partitionName is present"); + } + } + + + @JsonCreator + public static QualifiedName fromJson(JsonNode node) { + JsonNode catalogNameNode = node.path("catalogName"); + if (catalogNameNode.isMissingNode() || catalogNameNode.isNull() || !catalogNameNode.isTextual()) { + // If catalogName is not present try to load from the qualifiedName node instead + JsonNode nameNode = node.path("qualifiedName"); + if (!nameNode.isNull() && nameNode.isTextual()) { + return fromString(nameNode.asText(), false); + } else { + // if neither are available throw an exception + throw new IllegalStateException("Node '" + node + "' is missing catalogName"); + } + } + String catalogName = catalogNameNode.asText(); + JsonNode databaseNameNode = node.path("databaseName"); + String databaseName = null; + if( databaseNameNode != null){ + databaseName = databaseNameNode.asText(); + } + JsonNode tableNameNode = node.path("tableName"); + String tableName = null; + if( tableNameNode != null){ + tableName = tableNameNode.asText(); + } + JsonNode partitionNameNode = node.path("partitionName"); + String partitionName = null; + if( partitionNameNode != null){ + partitionName = partitionNameNode.asText(); + } + JsonNode viewNameNode = node.path("viewName"); + String viewName = null; + if( viewNameNode != null){ + viewName = viewNameNode.asText(); + } + return new QualifiedName(catalogName, databaseName, tableName, partitionName, viewName); + } + + public static QualifiedName fromString(@Nonnull String s){ + return fromString( s, false); + } + + public static QualifiedName fromString(@Nonnull String s, boolean isView) { + //noinspection ConstantConditions + String name = s == null ? "" : s.trim(); + if (name.isEmpty()) { + throw new IllegalArgumentException("passed in an empty definition name"); + } + + String[] parts = name.split("/", 4); + switch (parts.length) { + case 1: + return ofCatalog(parts[0]); + case 2: + return ofDatabase(parts[0], parts[1]); + case 3: + return ofTable(parts[0], parts[1], parts[2]); + case 4: + if( isView){ + return ofView(parts[0], parts[1], parts[2], parts[3]); + } else { + return ofPartition(parts[0], parts[1], parts[2], parts[3]); + } + default: + throw new IllegalArgumentException("Unable to convert '" + s + "' into a qualifiedDefinition"); + } + } + + public static QualifiedName ofCatalog(@Nonnull String catalogName) { + return new QualifiedName(catalogName, null, null, null, null); + } + + public static QualifiedName ofDatabase(@Nonnull String catalogName, @Nonnull String databaseName) { + return new QualifiedName(catalogName, databaseName, null, null, null); + } + + public static QualifiedName ofView(@Nonnull String catalogName, @Nonnull String databaseName, + @Nonnull String tableName, @Nonnull String viewName) { + return new QualifiedName( catalogName, databaseName, tableName, null, viewName); + } + + public static QualifiedName ofPartition(@Nonnull QualifiedName tableName, @Nonnull PartitionDto partitionDto) { + return ofPartition( + tableName.tableName, + tableName.databaseName, + tableName.tableName, + partitionDto.getName().getPartitionName() + ); + } + + public static QualifiedName ofPartition(@Nonnull String catalogName, @Nonnull String databaseName, + @Nonnull String tableName, @Nonnull String partitionName) { + return new QualifiedName(catalogName, databaseName, tableName, partitionName, null); + } + + public static QualifiedName ofTable(@Nonnull String catalogName, @Nonnull String databaseName, + @Nonnull String tableName) { + return new QualifiedName(catalogName, databaseName, tableName, null, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof QualifiedName)) return false; + QualifiedName that = (QualifiedName) o; + return Objects.equals(catalogName, that.catalogName) && + Objects.equals(databaseName, that.databaseName) && + Objects.equals(partitionName, that.partitionName) && + Objects.equals(tableName, that.tableName) && + Objects.equals(viewName, that.viewName); + } + + public String getCatalogName() { + return catalogName; + } + + public String getDatabaseName() { + if (databaseName.isEmpty()) { + throw new IllegalStateException("This is not a database definition"); + } + return databaseName; + } + + public String getPartitionName() { + if (partitionName.isEmpty()) { + throw new IllegalStateException("This is not a partition definition"); + } + return partitionName; + } + + public String getTableName() { + if (tableName.isEmpty()) { + throw new IllegalStateException("This is not a table definition"); + } + return tableName; + } + + @Override + public int hashCode() { + return Objects.hash(catalogName, databaseName, partitionName, tableName, viewName); + } + + public boolean isCatalogDefinition() { + return !catalogName.isEmpty(); + } + + public boolean isDatabaseDefinition() { + return !databaseName.isEmpty(); + } + + public boolean isPartitionDefinition() { + return !partitionName.isEmpty(); + } + + public boolean isTableDefinition() { + return !tableName.isEmpty(); + } + + private String standardizeOptional(String value, boolean forceLowerCase) { + if (value == null) { + return ""; + } else { + value = value.trim(); + if (forceLowerCase) { + value = value.toLowerCase(); + } + return value; + } + } + + private String standardizeRequired(String name, String value) { + if (value == null) { + throw new IllegalStateException(name + " cannot be null"); + } + + value = value.trim(); + if (value.isEmpty()) { + throw new IllegalStateException(name + " cannot be an empty string"); + } + + return value.toLowerCase(); + } + + @JsonValue + public Map toJson() { + if( qualifiedNameMap == null) { + Map map = new HashMap<>(4); + map.put("qualifiedName", toString()); + map.put("catalogName", catalogName); + + if (!databaseName.isEmpty()) { + map.put("databaseName", databaseName); + } + + if (!tableName.isEmpty()) { + map.put("tableName", tableName); + } + + if (!partitionName.isEmpty()) { + map.put("partitionName", partitionName); + } + + if (!viewName.isEmpty()) { + map.put("viewName", viewName); + } + + qualifiedNameMap = map; + } + + return qualifiedNameMap; + } + + public boolean isViewDefinition() { + return !viewName.isEmpty(); + } + + @Override + public String toString() { + if( qualifiedName == null) { + StringBuilder sb = new StringBuilder(catalogName); + + if (!databaseName.isEmpty()) { + sb.append('/'); + sb.append(databaseName); + } + + if (!tableName.isEmpty()) { + sb.append('/'); + sb.append(tableName); + } + + if (!partitionName.isEmpty()) { + sb.append('/'); + sb.append(partitionName); + } + + if (!viewName.isEmpty()) { + sb.append('/'); + sb.append(viewName); + } + qualifiedName = sb.toString(); + } + + return qualifiedName; + } + + public static String toWildCardString(String sourceName, String databaseName, String tableName){ + if( sourceName == null && databaseName ==null && tableName == null){ + return null; + } + StringBuilder builder = new StringBuilder(); + if( sourceName != null){ + builder.append(sourceName); + } else { + builder.append('%'); + } + if(databaseName != null){ + builder.append('/').append(databaseName); + } else { + builder.append("/%"); + } + if(tableName != null){ + builder.append('/').append(tableName); + } else { + builder.append("/%"); + } + builder.append('%'); + return builder.toString(); + } + + public String getViewName() { + return viewName; + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/api/MetacatV1.java b/metacat-common/src/main/java/com/netflix/metacat/common/api/MetacatV1.java new file mode 100644 index 000000000..6e251ebb8 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/api/MetacatV1.java @@ -0,0 +1,497 @@ +package com.netflix.metacat.common.api; + +import com.netflix.metacat.common.NameDateDto; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.CatalogMappingDto; +import com.netflix.metacat.common.dto.CreateCatalogDto; +import com.netflix.metacat.common.dto.DatabaseCreateRequestDto; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.TableDto; +import com.wordnik.swagger.annotations.Api; +import com.wordnik.swagger.annotations.ApiOperation; +import com.wordnik.swagger.annotations.ApiParam; +import com.wordnik.swagger.annotations.ApiResponse; +import com.wordnik.swagger.annotations.ApiResponses; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import java.net.HttpURLConnection; +import java.util.List; + +@Path("mds/v1") +@Api(value = "MetacatV1", + description = "Federated metadata operations", + produces = MediaType.APPLICATION_JSON, + consumes = MediaType.APPLICATION_JSON) +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public interface MetacatV1 { + @POST + @Path("catalog") + @ApiOperation( + position = 3, + value = "Creates a new catalog", + notes = "Returns success if there were no errors creating the catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, message = "No catalogs are registered with the server") + }) + void createCatalog(CreateCatalogDto createCatalogDto); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 2, + value = "Creates the given database in the given catalog", + notes = "Given a catalog and a database name, creates the database in the catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database cannot be located" + ) + }) + void createDatabase( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The database information", required = false) + DatabaseCreateRequestDto databaseCreateRequestDto + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 2, + value = "Creates a table", + notes = "Creates the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto createTable( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The table information", required = true) + TableDto table + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 2, + value = "Creates a metacat view. A staging table that can contain partitions referring to the table partition locations.", + notes = "Creates the given metacat view. A staging table that can contain partitions referring to the table partition locations.") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto createMView( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "To snapshot a list of partitions of the table to this view. If true, it will restore the partitions from the table to this view.", required = false) + @DefaultValue("false") @QueryParam("snapshot") + Boolean snapshot, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter + ); + + @DELETE + @Path("catalog/{catalog-name}/database/{database-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 4, + value = "Deletes the given database from the given catalog", + notes = "Given a catalog and database, deletes the database from the catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database cannot be located" + ) + }) + void deleteDatabase( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName + ); + + @DELETE + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 4, + value = "Delete table", + notes = "Deletes the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto deleteTable( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName + ); + + @DELETE + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 4, + value = "Delete metacat view", + notes = "Deletes the given metacat view") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + TableDto deleteMView( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName + ); + + @GET + @Path("catalog/{catalog-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 2, + value = "Databases for the requested catalog", + notes = "The list of databases that belong to the given catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, message = "The requested catalog cannot be located") + }) + CatalogDto getCatalog( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName + ); + + @GET + @Path("catalog") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "List registered catalogs", + notes = "The names and types of all catalogs registered with this server") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, message = "No catalogs are registered with the server") + }) + List getCatalogNames(); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "Tables for the requested database", + notes = "The list of tables that belong to the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database cannot be located" + ) + }) + DatabaseDto getDatabase( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("true") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "Table information", + notes = "Table information for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto getTable( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Whether to include the core information about the table (location, serde, columns) in " + + "the response. You would only say false here if you only want metadata.", required = false) + @DefaultValue("true") @QueryParam("includeInfo") + Boolean includeInfo, + @ApiParam(value = "Whether to include user definition metadata information to the response", required = false) + @DefaultValue("true") @QueryParam("includeDefinitionMetadata") + Boolean includeDefinitionMetadata, + @ApiParam(value = "Whether to include user data metadata information to the response", required = false) + @DefaultValue("true") @QueryParam("includeDataMetadata") + Boolean includeDataMetadata + ); + + @GET + @Path("catalog/{catalog-name}/mviews") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "List of metacat views", + notes = "List of metacat views for a catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog cannot be located" + ) + }) + List getMViews( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mviews") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "List of metacat views", + notes = "List of metacat views for a catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog cannot be located" + ) + }) + List getMViews( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "Metacat View information", + notes = "View information for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto getMView( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the view", required = true) + @PathParam("view-name") + String viewName + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/rename") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 3, + value = "Rename table", + notes = "Renames the given table with the new name") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + void renameTable( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the table", required = true) + @QueryParam("newTableName") + String newTableName + ); + + @PUT + @Path("catalog/{catalog-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 4, + value = "Updates an existing catalog", + notes = "Returns success if there were no errors updating the catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, message = "No catalogs are registered with the server") + }) + void updateCatalog( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + CreateCatalogDto createCatalogDto + ); + + @PUT + @Path("catalog/{catalog-name}/database/{database-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 3, + value = "Updates the given database in the given catalog", + notes = "Given a catalog and a database name, creates the database in the catalog") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database cannot be located" + ) + }) + void updateDatabase( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The database information", required = false) + DatabaseCreateRequestDto databaseUpdateRequestDto + ); + + @PUT + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 3, + value = "Update mview", + notes = "Updates the given mview") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto updateMView( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "The view information", required = true) + TableDto table + ); + + @PUT + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 3, + value = "Update table", + notes = "Updates the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + TableDto updateTable( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The table information", required = true) + TableDto table + ); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/api/MetadataV1.java b/metacat-common/src/main/java/com/netflix/metacat/common/api/MetadataV1.java new file mode 100644 index 000000000..d2d591259 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/api/MetadataV1.java @@ -0,0 +1,83 @@ +package com.netflix.metacat.common.api; + +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.DataMetadataDto; +import com.netflix.metacat.common.dto.DataMetadataGetRequestDto; +import com.netflix.metacat.common.dto.DefinitionMetadataDto; +import com.netflix.metacat.common.dto.SortOrder; +import com.wordnik.swagger.annotations.Api; +import com.wordnik.swagger.annotations.ApiOperation; +import com.wordnik.swagger.annotations.ApiParam; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import java.util.List; +import java.util.Set; + +@Path("mds/v1/metadata") +@Api(value = "MetadataV1", + description = "Federated user metadata operations", + produces = MediaType.APPLICATION_JSON, + consumes = MediaType.APPLICATION_JSON) +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public interface MetadataV1 { + @POST + @Path("data") + @ApiOperation( + position = 1, + value = "Returns the data metadata", + notes = "Returns the data metadata") + DataMetadataDto getDataMetadata(DataMetadataGetRequestDto metadataGetRequestDto); + + @GET + @Path("definition/list") + @ApiOperation( + position = 2, + value = "Returns the definition metadata", + notes = "Returns the definition metadata") + List getDefinitionMetadataList( + @ApiParam(value = "Sort the list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "has lifetime set", required = false) + @DefaultValue("false") @QueryParam("lifetime") + Boolean lifetime, + @ApiParam(value = "Type of the metadata item. Values: database, table, partition", required = false) + @QueryParam("type") + String type, + @ApiParam(value = "Text that matches the name of the metadata (accepts sql wildcards)", required = false) + @QueryParam("name") + String name, + @ApiParam(value = "Set of data property names. Filters the returned list that only contains the given property names", required = false) + @QueryParam("data-property") + Set dataProperties + ); + + @GET + @Path("searchByOwners") + @ApiOperation( + position = 3, + value = "Returns the qualified names owned by the given owners", + notes = "Returns the qualified names owned by the given owners") + List searchByOwners( + @ApiParam(value = "Set of owners", required = true) + @QueryParam("owner") + Set owners + ); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/api/PartitionV1.java b/metacat-common/src/main/java/com/netflix/metacat/common/api/PartitionV1.java new file mode 100644 index 000000000..bc67b2a2c --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/api/PartitionV1.java @@ -0,0 +1,726 @@ +package com.netflix.metacat.common.api; + +import com.netflix.metacat.common.dto.GetPartitionsRequestDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; +import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; +import com.netflix.metacat.common.dto.SortOrder; +import com.wordnik.swagger.annotations.Api; +import com.wordnik.swagger.annotations.ApiOperation; +import com.wordnik.swagger.annotations.ApiParam; +import com.wordnik.swagger.annotations.ApiResponse; +import com.wordnik.swagger.annotations.ApiResponses; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import java.net.HttpURLConnection; +import java.util.List; + +/** + * Created by amajumdar on 6/17/15. + */ +@Path("mds/v1/partition") +@Api(value = "PartitionV1", + description = "Federated partition metadata operations", + produces = MediaType.APPLICATION_JSON, + consumes = MediaType.APPLICATION_JSON) +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public interface PartitionV1 { + @DELETE + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "Delete named partitions from a table", + notes = "List of partitions names of the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ), + @ApiResponse(code = HttpURLConnection.HTTP_BAD_REQUEST, + message = "The list of partitionNames is not present" + ) + }) + void deletePartitions( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "partitionId of the partitions to be deleted from this table", required = true) + List partitionIds + ); + + + @DELETE + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "Delete partitions for the given view", + notes = "Delete partitions for the given view") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ), + @ApiResponse(code = HttpURLConnection.HTTP_BAD_REQUEST, + message = "The list of partitionNames is not present" + ) + }) + void deletePartitions( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "partitionId of the partitions to be deleted from this table", required = true) + List partitionIds + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partitions for a table", + notes = "List of partitions for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + List getPartitions( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partitions for a metacat view", + notes = "List of partitions for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + List getPartitions( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/request") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partitions for a table", + notes = "List of partitions for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + List getPartitionsForRequest( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata, + @ApiParam(value = "Request containing the filter expression for the partitions", required = false) + GetPartitionsRequestDto getPartitionsRequestDto + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/request") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partitions for a metacat view", + notes = "List of partitions for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + List getPartitionsForRequest( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata, + @ApiParam(value = "Request containing the filter expression for the partitions", required = false) + GetPartitionsRequestDto getPartitionsRequestDto + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition keys for a table", + notes = "List of partition keys for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + List getPartitionKeys( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition keys for a metacat view", + notes = "List of partition keys for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + List getPartitionKeys( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/keys-request") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition keys for a table", + notes = "List of partition keys for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + List getPartitionKeysForRequest( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata, + @ApiParam(value = "Request containing the filter expression for the partitions", required = false) + GetPartitionsRequestDto getPartitionsRequestDto + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/keys-request") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition keys for a metacat view", + notes = "List of partition keys for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + List getPartitionKeysForRequest( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata, + @ApiParam(value = "Request containing the filter expression for the partitions", required = false) + GetPartitionsRequestDto getPartitionsRequestDto + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition uris for a table", + notes = "List of partition uris for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + List getPartitionUris( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition uris for a metacat view", + notes = "List of partition uris for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + List getPartitionUris( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Filter expression string to use", required = false) + @QueryParam("filter") + String filter, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/uris-request") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition uris for a table", + notes = "List of partition uris for the given table name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + List getPartitionUrisForRequest( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata, + @ApiParam(value = "Request containing the filter expression for the partitions", required = false) + GetPartitionsRequestDto getPartitionsRequestDto + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/uris-request") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + value = "List of partition uris for a metacat view", + notes = "List of partition uris for the given view name under the given catalog and database") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or metacat view cannot be located" + ) + }) + List getPartitionUrisForRequest( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the metacat view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Sort the partition list by this value", required = false) + @QueryParam("sortBy") + String sortBy, + @ApiParam(value = "Sorting order to use", required = false) + @QueryParam("sortOrder") + SortOrder sortOrder, + @ApiParam(value = "Offset of the list returned", required = false) + @QueryParam("offset") + Integer offset, + @ApiParam(value = "Size of the partition list", required = false) + @QueryParam("limit") + Integer limit, + @ApiParam(value = "Whether to include user metadata information to the response", required = false) + @DefaultValue("false") @QueryParam("includeUserMetadata") + Boolean includeUserMetadata, + @ApiParam(value = "Request containing the filter expression for the partitions", required = false) + GetPartitionsRequestDto getPartitionsRequestDto + ); + + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 5, + value = "Add/update partitions to the given table", + notes = "Add/update partitions to the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + PartitionsSaveResponseDto savePartitions( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Request containing the list of partitions", required = true) + PartitionsSaveRequestDto partitionsSaveRequestDto + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 5, + value = "Add/update partitions to the given table", + notes = "Add/update partitions to the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + PartitionsSaveResponseDto savePartitions( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the view", required = true) + @PathParam("view-name") + String viewName, + @ApiParam(value = "Request containing the list of partitions", required = true) + PartitionsSaveRequestDto partitionsSaveRequestDto + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/count") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 5, + value = "Partition count for the given table", + notes = "Partition count for the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + Integer getPartitionCount( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName + ); + + @GET + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}/mview/{view-name}/count") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 5, + value = "Partition count for the given table", + notes = "Partition count for the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + Integer getPartitionCount( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "The name of the view", required = true) + @PathParam("view-name") + String viewName + ); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/api/SearchMetacatV1.java b/metacat-common/src/main/java/com/netflix/metacat/common/api/SearchMetacatV1.java new file mode 100644 index 000000000..f5f4f3e8d --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/api/SearchMetacatV1.java @@ -0,0 +1,24 @@ +package com.netflix.metacat.common.api; + +import com.netflix.metacat.common.dto.TableDto; + +import javax.ws.rs.Consumes; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import java.util.List; + +@Path("mds/v1/search") +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public interface SearchMetacatV1 { + @GET + @Path("table") + @Consumes(MediaType.APPLICATION_JSON) + List searchTables( + @QueryParam("q") + String searchString + ); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/api/TagV1.java b/metacat-common/src/main/java/com/netflix/metacat/common/api/TagV1.java new file mode 100644 index 000000000..ae6293fb1 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/api/TagV1.java @@ -0,0 +1,147 @@ +package com.netflix.metacat.common.api; + +import com.netflix.metacat.common.QualifiedName; +import com.wordnik.swagger.annotations.Api; +import com.wordnik.swagger.annotations.ApiOperation; +import com.wordnik.swagger.annotations.ApiParam; +import com.wordnik.swagger.annotations.ApiResponse; +import com.wordnik.swagger.annotations.ApiResponses; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import java.net.HttpURLConnection; +import java.util.List; +import java.util.Set; + +@Path("mds/v1/tag") +@Api(value = "TagV1", + description = "Federated metadata tag operations", + produces = MediaType.APPLICATION_JSON, + consumes = MediaType.APPLICATION_JSON) +@Consumes(MediaType.APPLICATION_JSON) +@Produces(MediaType.APPLICATION_JSON) +public interface TagV1 { + @GET + @Path("tags") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "Returns the tags", + notes = "Returns the tags") + Set getTags(); + + @GET + @Path("list") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "Returns the list of qualified names that are tagged with the given tags. Qualified names will be excluded if the contained tags matches the excluded tags", + notes = "Returns the list of qualified names that are tagged with the given tags. Qualified names will be excluded if the contained tags matches the excluded tags") + List list( + @ApiParam(value = "Set of matching tags", required = false) + @QueryParam("include") + Set includeTags, + @ApiParam(value = "Set of un-matching tags", required = false) + @QueryParam("exclude") + Set excludeTags, + @ApiParam(value = "Prefix of the source name", required = false) + @QueryParam("sourceName") + String sourceName, + @ApiParam(value = "Prefix of the database name", required = false) + @QueryParam("databaseName") + String databaseName, + @ApiParam(value = "Prefix of the table name", required = false) + @QueryParam("tableName") + String tableName + ); + + @GET + @Path("search") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 1, + value = "Returns the list of qualified names that are tagged with tags containing the given tagText", + notes = "Returns the list of qualified names that are tagged with tags containing the given tagText") + List search( + @ApiParam(value = "Tag partial text", required = false) + @QueryParam("tag") + String tag, + @ApiParam(value = "Prefix of the source name", required = false) + @QueryParam("sourceName") + String sourceName, + @ApiParam(value = "Prefix of the database name", required = false) + @QueryParam("databaseName") + String databaseName, + @ApiParam(value = "Prefix of the table name", required = false) + @QueryParam("tableName") + String tableName + ); + + @POST + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 2, + value = "Sets the tags on the given table", + notes = "Sets the tags on the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + Set setTableTags( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "Set of tags", required = true) + Set tags + ); + + @DELETE + @Path("catalog/{catalog-name}/database/{database-name}/table/{table-name}") + @Consumes(MediaType.APPLICATION_JSON) + @Produces(MediaType.APPLICATION_JSON) + @ApiOperation( + position = 4, + value = "Remove the tags from the given table", + notes = "Remove the tags from the given table") + @ApiResponses(value = { + @ApiResponse(code = HttpURLConnection.HTTP_NOT_FOUND, + message = "The requested catalog or database or table cannot be located" + ) + }) + void removeTableTags( + @ApiParam(value = "The name of the catalog", required = true) + @PathParam("catalog-name") + String catalogName, + @ApiParam(value = "The name of the database", required = true) + @PathParam("database-name") + String databaseName, + @ApiParam(value = "The name of the table", required = true) + @PathParam("table-name") + String tableName, + @ApiParam(value = "True if all tags need to be removed", required = false) + @DefaultValue("false") @QueryParam("all") + Boolean deleteAll, + @ApiParam(value = "Tags to be removed from the given table", required = false) + Set tags + ); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/AuditDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/AuditDto.java new file mode 100644 index 000000000..369ec0010 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/AuditDto.java @@ -0,0 +1,73 @@ +package com.netflix.metacat.common.dto; + +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.util.Date; +import java.util.Objects; + +@SuppressWarnings("unused") +public class AuditDto extends BaseDto { + private static final long serialVersionUID = 9221109874202093789L; + + /* Created By */ + @ApiModelProperty(value = "User name who created the table", required = false) + private String createdBy; + /* Created date */ + @ApiModelProperty(value = "Creation date", required = false) + private Date createdDate; + /* Last modified by */ + @ApiModelProperty(value = "User name who last modified the table", required = false) + private String lastModifiedBy; + /* Last modified date */ + @ApiModelProperty(value = "Last modified date", required = false) + private Date lastModifiedDate; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof AuditDto)) return false; + AuditDto auditDto = (AuditDto) o; + return Objects.equals(createdBy, auditDto.createdBy) && + Objects.equals(createdDate, auditDto.createdDate) && + Objects.equals(lastModifiedBy, auditDto.lastModifiedBy) && + Objects.equals(lastModifiedDate, auditDto.lastModifiedDate); + } + + public String getCreatedBy() { + return createdBy; + } + + public void setCreatedBy(String createdBy) { + this.createdBy = createdBy; + } + + public Date getCreatedDate() { + return createdDate; + } + + public void setCreatedDate(Date createdDate) { + this.createdDate = createdDate; + } + + public String getLastModifiedBy() { + return lastModifiedBy; + } + + public void setLastModifiedBy(String lastModifiedBy) { + this.lastModifiedBy = lastModifiedBy; + } + + public Date getLastModifiedDate() { + return lastModifiedDate; + } + + public void setLastModifiedDate(Date lastModifiedDate) { + this.lastModifiedDate = lastModifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(createdBy, createdDate, lastModifiedBy, lastModifiedDate); + } + +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/BaseDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/BaseDto.java new file mode 100644 index 000000000..b8d31fcb8 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/BaseDto.java @@ -0,0 +1,28 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.json.MetacatJsonLocator; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; + +public abstract class BaseDto implements Serializable { + @Nullable + public static ObjectNode deserializeObjectNode(@Nonnull ObjectInputStream inputStream) throws IOException { + return MetacatJsonLocator.INSTANCE.deserializeObjectNode(inputStream); + } + + public static void serializeObjectNode(@Nonnull ObjectOutputStream outputStream, @Nullable ObjectNode json) + throws IOException { + MetacatJsonLocator.INSTANCE.serializeObjectNode(outputStream, json); + } + + @Override + public String toString() { + return MetacatJsonLocator.INSTANCE.toJsonString(this); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogDto.java new file mode 100644 index 000000000..4599160a1 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogDto.java @@ -0,0 +1,118 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.List; +import java.util.Objects; + +/** + * Information about a catalog + */ +@ApiModel("Information about a catalog") +@SuppressWarnings("unused") +public class CatalogDto extends BaseDto implements HasDefinitionMetadata { + private static final long serialVersionUID = -5713826608609231492L; + + @ApiModelProperty(value = "a list of the names of the databases that belong to this catalog", required = true) + private List databases; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the logical catalog") + @JsonProperty + private transient ObjectNode definitionMetadata; + @ApiModelProperty(value = "the name of this entity", required = true) + @JsonProperty + private QualifiedName name; + @ApiModelProperty(value = "the type of the connector of this catalog", required = true) + private String type; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CatalogDto)) return false; + CatalogDto that = (CatalogDto) o; + return Objects.equals(databases, that.databases) && + Objects.equals(definitionMetadata, that.definitionMetadata) && + Objects.equals(name, that.name) && + Objects.equals(type, that.type); + } + + /** + * @return the list of the names of the databases under the catalog + */ + public List getDatabases() { + return databases; + } + + /** + * @param databases the list of the names of the databases under this catalog + */ + public void setDatabases(List databases) { + this.databases = databases; + } + + @Override + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + @Override + public void setDefinitionMetadata(ObjectNode metadata) { + this.definitionMetadata = metadata; + } + + @JsonIgnore + public QualifiedName getDefinitionName() { + return name; + } + + /** + * @return name of the catalog + */ + public QualifiedName getName() { + return name; + } + + /** + * @param name The name of this catalog + */ + public void setName(QualifiedName name) { + this.name = name; + } + + /** + * @return the name of the connector + */ + public String getType() { + return type; + } + + /** + * @param type the name of the connector used by this catalog + */ + public void setType(String type) { + this.type = type; + } + + @Override + public int hashCode() { + return Objects.hash(databases, definitionMetadata, name, type); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, definitionMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogMappingDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogMappingDto.java new file mode 100644 index 000000000..d2005b1cd --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/CatalogMappingDto.java @@ -0,0 +1,56 @@ +package com.netflix.metacat.common.dto; + +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +@ApiModel(description = "The name and type of a catalog") +@SuppressWarnings("unused") +public class CatalogMappingDto extends BaseDto { + private static final long serialVersionUID = -1223516438943164936L; + + @ApiModelProperty(value = "The name of the catalog", required = true) + private String catalogName; + @ApiModelProperty(value = "The connector type of the catalog", required = true) + private String connectorName; + + public CatalogMappingDto() { + } + + public CatalogMappingDto(String catalogName, String connectorName) { + this.catalogName = catalogName; + this.connectorName = connectorName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CatalogMappingDto)) return false; + CatalogMappingDto that = (CatalogMappingDto) o; + return Objects.equals(catalogName, that.catalogName) && + Objects.equals(connectorName, that.connectorName); + } + + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catalogName) { + this.catalogName = catalogName; + } + + public String getConnectorName() { + return connectorName; + } + + public void setConnectorName(String connectorName) { + this.connectorName = connectorName; + } + + @Override + public int hashCode() { + return Objects.hash(catalogName, connectorName); + } + +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/CreateCatalogDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/CreateCatalogDto.java new file mode 100644 index 000000000..4e4ddff1d --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/CreateCatalogDto.java @@ -0,0 +1,45 @@ +package com.netflix.metacat.common.dto; + +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +/** + * Information required to create a new catalog + */ +@ApiModel("Information required to create a new catalog") +public class CreateCatalogDto extends BaseDto { + private static final long serialVersionUID = -5037037662666608796L; + + @ApiModelProperty(value = "the type of the connector of this catalog", required = true) + private String type; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CreateCatalogDto)) return false; + CreateCatalogDto that = (CreateCatalogDto) o; + return Objects.equals(type, that.type); + } + + /** + * @return the name of the connector + */ + public String getType() { + return type; + } + + /** + * @param type the name of the connector used by this catalog + */ + public void setType(String type) { + this.type = type; + } + + @Override + public int hashCode() { + return Objects.hash(type); + } + +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataDto.java new file mode 100644 index 000000000..f8e383be2 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataDto.java @@ -0,0 +1,72 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** + * Created by amajumdar on 5/28/15. + */ +public class DataMetadataDto extends BaseDto implements HasDataMetadata{ + private static final long serialVersionUID = -874750260731085106L; + private String uri; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata") + @JsonProperty + private transient ObjectNode dataMetadata; + + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + public ObjectNode getDataMetadata() { + return dataMetadata; + } + + public void setDataMetadata(ObjectNode dataMetadata) { + this.dataMetadata = dataMetadata; + } + + /** + * @return The uri that points to the location of the external data + * @throws IllegalStateException if this instance does not have external data + */ + @Nonnull + @Override + @JsonIgnore + public String getDataUri() { + return uri; + } + + /** + * @return true if this particular instance points to external data + */ + @Override + public boolean isDataExternal() { + return false; + } + + @SuppressWarnings("EmptyMethod") + public void setDataExternal(boolean ignored) { + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + dataMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, dataMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataGetRequestDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataGetRequestDto.java new file mode 100644 index 000000000..16dba3369 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DataMetadataGetRequestDto.java @@ -0,0 +1,16 @@ +package com.netflix.metacat.common.dto; + +/** + * Created by amajumdar on 6/26/15. + */ +public class DataMetadataGetRequestDto extends BaseDto{ + private String uri; + + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseCreateRequestDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseCreateRequestDto.java new file mode 100644 index 000000000..627bcf7e5 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseCreateRequestDto.java @@ -0,0 +1,38 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** + * Created by amajumdar on 5/5/15. + */ +public class DatabaseCreateRequestDto extends BaseDto{ + private static final long serialVersionUID = 6308417213106650174L; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the physical data") + @JsonProperty + private transient ObjectNode definitionMetadata; + + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + public void setDefinitionMetadata(ObjectNode definitionMetadata) { + this.definitionMetadata = definitionMetadata; + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, definitionMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseDto.java new file mode 100644 index 000000000..81894f330 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DatabaseDto.java @@ -0,0 +1,140 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +@ApiModel("Tables and other information about the given database") +@SuppressWarnings("unused") +public class DatabaseDto extends BaseDto implements HasDefinitionMetadata { + private static final long serialVersionUID = -4530516372664788451L; + private Date dateCreated; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the logical database") + @JsonProperty + private transient ObjectNode definitionMetadata; + private Date lastUpdated; + @ApiModelProperty(value = "the name of this entity", required = true) + @JsonProperty + private QualifiedName name; + @ApiModelProperty(value = "Names of the tables in this database", required = true) + private List tables; + @ApiModelProperty(value = "Connector type of this catalog", required = true) + private String type; + @ApiModelProperty(value = "Any extra metadata properties of the database", required = false) + private Map metadata; + @ApiModelProperty(value = "URI of the database. Only applies to certain data sources like hive, S3", required = false) + private String uri; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof DatabaseDto)) return false; + DatabaseDto that = (DatabaseDto) o; + return Objects.equals(definitionMetadata, that.definitionMetadata) && + Objects.equals(name, that.name) && + Objects.equals(tables, that.tables) && + Objects.equals(type, that.type) && + Objects.equals(lastUpdated, that.lastUpdated) && + Objects.equals(dateCreated, that.dateCreated) && + Objects.equals(metadata, that.metadata) && + Objects.equals(uri, that.uri); + } + + public Date getDateCreated() { + return dateCreated; + } + + public void setDateCreated(Date dateCreated) { + this.dateCreated = dateCreated; + } + + @Override + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + @Override + public void setDefinitionMetadata(ObjectNode metadata) { + this.definitionMetadata = metadata; + } + + @JsonIgnore + public QualifiedName getDefinitionName() { + return name; + } + + public Date getLastUpdated() { + return lastUpdated; + } + + public void setLastUpdated(Date lastUpdated) { + this.lastUpdated = lastUpdated; + } + + public QualifiedName getName() { + return name; + } + + public void setName(QualifiedName name) { + this.name = name; + } + + public List getTables() { + return tables; + } + + public void setTables(List tables) { + this.tables = tables; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @Override + public int hashCode() { + return Objects.hash(definitionMetadata, name, tables, type, lastUpdated, dateCreated, metadata, uri); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, definitionMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/DefinitionMetadataDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DefinitionMetadataDto.java new file mode 100644 index 000000000..47a00d0d9 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/DefinitionMetadataDto.java @@ -0,0 +1,57 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +/** + * Created by amajumdar on 5/28/15. + */ +public class DefinitionMetadataDto extends BaseDto implements HasDefinitionMetadata{ + private static final long serialVersionUID = 3826462875655878L; + private QualifiedName name; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata") + @JsonProperty + private transient ObjectNode definitionMetadata; + + public QualifiedName getName() { + return name; + } + + public void setName(QualifiedName name) { + this.name = name; + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, definitionMetadata); + } + + @Override + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + @Override + public void setDefinitionMetadata(ObjectNode definitionMetadata) { + this.definitionMetadata = definitionMetadata; + } + + @Override + @JsonIgnore + public QualifiedName getDefinitionName() { + return name; + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/FieldDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/FieldDto.java new file mode 100644 index 000000000..a799ae234 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/FieldDto.java @@ -0,0 +1,146 @@ +package com.netflix.metacat.common.dto; + +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +@ApiModel(value = "Table field/column metadata") +@SuppressWarnings("unused") +public class FieldDto extends BaseDto { + private static final long serialVersionUID = 9096928516299407324L; + + @ApiModelProperty(value = "Comment of the field/column", required = false) + private String comment; + @ApiModelProperty(value = "Name of the field/column", required = true) + private String name; + @ApiModelProperty(value = "Is it a partition Key. If true, it is a partition key.", required = false) + private boolean partition_key; + @ApiModelProperty(value = "Position of the field/column", required = true) + private Integer pos; + @ApiModelProperty(value = "Source type of the field/column", required = false) + private String source_type; + @ApiModelProperty(value = "Type of the field/column", required = true) + private String type; + @ApiModelProperty(value = "Can the field/column be null", required = false) + private Boolean isNullable; + @ApiModelProperty(value = "Size of the field/column", required = false) + private Integer size; + @ApiModelProperty(value = "Default value of the column", required = false) + private String defaultValue; + @ApiModelProperty(value = "Is the column a sorted key", required = false) + private Boolean isSortKey; + @ApiModelProperty(value = "Is the column an index key", required = false) + private Boolean isIndexKey; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof FieldDto)) return false; + FieldDto fieldDto = (FieldDto) o; + return Objects.equals(partition_key, fieldDto.partition_key) && + Objects.equals(pos, fieldDto.pos) && + Objects.equals(comment, fieldDto.comment) && + Objects.equals(name, fieldDto.name) && + Objects.equals(source_type, fieldDto.source_type) && + Objects.equals(type, fieldDto.type) && + Objects.equals(isNullable, fieldDto.isNullable) && + Objects.equals(size, fieldDto.size) && + Objects.equals(defaultValue, fieldDto.defaultValue) && + Objects.equals(isSortKey, fieldDto.isSortKey) && + Objects.equals(isIndexKey, fieldDto.isIndexKey); + } + + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Integer getPos() { + return pos; + } + + public void setPos(Integer pos) { + this.pos = pos; + } + + public String getSource_type() { + return source_type; + } + + public void setSource_type(String source_type) { + this.source_type = source_type; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + @Override + public int hashCode() { + return Objects.hash(comment, name, partition_key, pos, source_type, type, isNullable, size, defaultValue, isSortKey, isIndexKey); + } + + public boolean isPartition_key() { + return partition_key; + } + + public void setPartition_key(boolean partition_key) { + this.partition_key = partition_key; + } + + public Boolean getIsNullable() { + return isNullable; + } + + public void setIsNullable(Boolean isNullable) { + this.isNullable = isNullable; + } + + public Integer getSize() { + return size; + } + + public void setSize(Integer size) { + this.size = size; + } + + public String getDefaultValue() { + return defaultValue; + } + + public void setDefaultValue(String defaultValue) { + this.defaultValue = defaultValue; + } + + public Boolean getIsSortKey() { + return isSortKey; + } + + public void setIsSortKey(Boolean isSortKey) { + this.isSortKey = isSortKey; + } + + public Boolean getIsIndexKey() { + return isIndexKey; + } + + public void setIsIndexKey(Boolean isIndexKey) { + this.isIndexKey = isIndexKey; + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/GetPartitionsRequestDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/GetPartitionsRequestDto.java new file mode 100644 index 000000000..c21a00081 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/GetPartitionsRequestDto.java @@ -0,0 +1,36 @@ +package com.netflix.metacat.common.dto; + +import java.util.List; + +/** + * Created by amajumdar on 5/28/15. + */ +public class GetPartitionsRequestDto extends BaseDto{ + String filter; + List partitionNames; + Boolean includePartitionDetails = false; + + public String getFilter() { + return filter; + } + + public void setFilter(String filter) { + this.filter = filter; + } + + public List getPartitionNames() { + return partitionNames; + } + + public void setPartitionNames(List partitionNames) { + this.partitionNames = partitionNames; + } + + public Boolean getIncludePartitionDetails() { + return includePartitionDetails!=null?includePartitionDetails:false; + } + + public void setIncludePartitionDetails(Boolean includePartitionDetails) { + this.includePartitionDetails = includePartitionDetails; + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDataMetadata.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDataMetadata.java new file mode 100644 index 000000000..9af6bca78 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDataMetadata.java @@ -0,0 +1,26 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.databind.node.ObjectNode; + +import javax.annotation.Nonnull; + +/** + * Marker interface for objects with data metadata + */ +public interface HasDataMetadata extends HasMetadata { + ObjectNode getDataMetadata(); + + void setDataMetadata(ObjectNode metadata); + + /** + * @return The uri that points to the location of the external data + * @throws IllegalStateException if this instance does not have external data + */ + @Nonnull + String getDataUri(); + + /** + * @return true if this particular instance points to external data + */ + boolean isDataExternal(); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDefinitionMetadata.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDefinitionMetadata.java new file mode 100644 index 000000000..7e2d6dc6a --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasDefinitionMetadata.java @@ -0,0 +1,15 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; + +/** + * Marker interface for objects with data metadata + */ +public interface HasDefinitionMetadata extends HasMetadata { + ObjectNode getDefinitionMetadata(); + + void setDefinitionMetadata(ObjectNode metadata); + + QualifiedName getDefinitionName(); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasMetadata.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasMetadata.java new file mode 100644 index 000000000..163b70bd1 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/HasMetadata.java @@ -0,0 +1,9 @@ +package com.netflix.metacat.common.dto; + +import java.io.Serializable; + +/** + * Marker interface for objects with metadata + */ +public interface HasMetadata extends Serializable { +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionDto.java new file mode 100644 index 000000000..2df4e5ce2 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionDto.java @@ -0,0 +1,145 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Map; +import java.util.Objects; + +@SuppressWarnings("unused") +public class PartitionDto extends BaseDto implements HasDataMetadata, HasDefinitionMetadata { + private static final long serialVersionUID = 783462697901395508L; + @ApiModelProperty(value = "audit information about the partition") + private AuditDto audit; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to this partition") + @JsonProperty + private transient ObjectNode dataMetadata; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the physical data") + @JsonProperty + private transient ObjectNode definitionMetadata; + @ApiModelProperty(value = "the name of this entity", required = true) + @JsonProperty + private QualifiedName name; + @ApiModelProperty(value = "Storage/Serialization/Deserialization info of the partition ") + private StorageDto serde; + @ApiModelProperty(value = "Any extra metadata properties of the partition", required = false) + private Map metadata; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof PartitionDto)) return false; + PartitionDto that = (PartitionDto) o; + return Objects.equals(audit, that.audit) && + Objects.equals(dataMetadata, that.dataMetadata) && + Objects.equals(definitionMetadata, that.definitionMetadata) && + Objects.equals(name, that.name) && + Objects.equals(serde, that.serde) && + Objects.equals(metadata, that.metadata); + } + + public AuditDto getAudit() { + return audit; + } + + public void setAudit(AuditDto audit) { + this.audit = audit; + } + + @Override + public ObjectNode getDataMetadata() { + return dataMetadata; + } + + @Override + public void setDataMetadata(ObjectNode metadata) { + this.dataMetadata = metadata; + } + + @Nonnull + @Override + @JsonIgnore + public String getDataUri() { + String uri = serde != null ? serde.getUri() : null; + if (uri == null || uri.isEmpty()) { + throw new IllegalStateException("This instance does not have external data"); + } + + return uri; + } + + @Override + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + @Override + public void setDefinitionMetadata(ObjectNode metadata) { + this.definitionMetadata = metadata; + } + + public QualifiedName getName() { + return name; + } + + public void setName(QualifiedName name) { + this.name = name; + } + + @JsonIgnore + public QualifiedName getDefinitionName() { + return name; + } + + public StorageDto getSerde() { + return serde; + } + + public void setSerde(StorageDto serde) { + this.serde = serde; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + @Override + public int hashCode() { + return Objects.hash(audit, dataMetadata, definitionMetadata, name, serde, metadata); + } + + @Override + @JsonProperty + public boolean isDataExternal() { + return serde != null && serde.getUri() != null && !serde.getUri().isEmpty(); + } + + @SuppressWarnings("EmptyMethod") + public void setDataExternal(boolean ignored) { + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + dataMetadata = deserializeObjectNode(in); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, dataMetadata); + serializeObjectNode(out, definitionMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveRequestDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveRequestDto.java new file mode 100644 index 000000000..ef9f4bd88 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveRequestDto.java @@ -0,0 +1,85 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.wordnik.swagger.annotations.ApiModelProperty; +import com.wordnik.swagger.annotations.ApiParam; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.List; + +/** + * Created by amajumdar on 5/4/15. + */ +public class PartitionsSaveRequestDto extends BaseDto{ + private static final long serialVersionUID = -5922699691074685961L; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to this table") + @JsonProperty + private transient ObjectNode dataMetadata; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the physical data") + @JsonProperty + private transient ObjectNode definitionMetadata; + // List of partitions + @ApiParam(value = "List of partitions", required = true) + private List partitions; + // List of partition ids/names for deletes + private List partitionIdsForDeletes; + // If true, we check if partition exists and drop it before adding it back. If false, we do not check and just add. + private Boolean checkIfExists = true; + + public ObjectNode getDataMetadata() { + return dataMetadata; + } + + public void setDataMetadata(ObjectNode dataMetadata) { + this.dataMetadata = dataMetadata; + } + + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + public void setDefinitionMetadata(ObjectNode definitionMetadata) { + this.definitionMetadata = definitionMetadata; + } + + public List getPartitions() { + return partitions; + } + + public void setPartitions(List partitions) { + this.partitions = partitions; + } + + public List getPartitionIdsForDeletes() { + return partitionIdsForDeletes; + } + + public void setPartitionIdsForDeletes(List partitionNamesForDeletes) { + this.partitionIdsForDeletes = partitionNamesForDeletes; + } + + public Boolean getCheckIfExists() { + return checkIfExists; + } + + public void setCheckIfExists(Boolean checkIfExists) { + this.checkIfExists = checkIfExists; + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + dataMetadata = deserializeObjectNode(in); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, dataMetadata); + serializeObjectNode(out, definitionMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveResponseDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveResponseDto.java new file mode 100644 index 000000000..e06312b65 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/PartitionsSaveResponseDto.java @@ -0,0 +1,37 @@ +package com.netflix.metacat.common.dto; + +import java.util.ArrayList; +import java.util.List; + +/** + * Created by amajumdar on 5/28/15. + */ +public class PartitionsSaveResponseDto extends BaseDto{ + List added; + List updated; + + public PartitionsSaveResponseDto() { + added = new ArrayList<>(); + updated = new ArrayList<>(); + } + + public List getAdded() { + return added; + } + + public void setAdded(List added) { + if( added != null) { + this.added = added; + } + } + + public List getUpdated() { + return updated; + } + + public void setUpdated(List updated) { + if( updated != null) { + this.updated = updated; + } + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/SortOrder.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/SortOrder.java new file mode 100644 index 000000000..4ea225e3c --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/SortOrder.java @@ -0,0 +1,5 @@ +package com.netflix.metacat.common.dto; + +public enum SortOrder { + ASC, DESC +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/StorageDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/StorageDto.java new file mode 100644 index 000000000..0560f5d10 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/StorageDto.java @@ -0,0 +1,116 @@ +package com.netflix.metacat.common.dto; + +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import java.util.Map; +import java.util.Objects; + +/** + *
+ {
+ "inputFormat": "org.apache.hadoop.mapred.TextInputFormat",
+ "outputFormat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
+ "serializationLib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
+ "parameters": {
+ "serialization.format": "1"
+ },
+ "owner": "charsmith"
+ }
+ * 
+ */ +@ApiModel("Serialization/Deserialization metadata of the table data") +@SuppressWarnings("unused") +public class StorageDto extends BaseDto { + private static final long serialVersionUID = 4933906340321707232L; + + @ApiModelProperty(value = "Input format of the table data stored", required = false) + private String inputFormat; + @ApiModelProperty(value = "Output format of the table data stored", required = false) + private String outputFormat; + @ApiModelProperty(value = "Owner of the table", required = false) + private String owner; + @ApiModelProperty(value = "Extra storage parameters", required = false) + private Map parameters; + @ApiModelProperty(value = "Extra storage parameters", required = false) + private Map serdeInfoParameters; + @ApiModelProperty(value = "Serialization library of the data", required = false) + private String serializationLib; + @ApiModelProperty(value = "URI of the table. Only applies to certain data sources like hive, S3", required = false) + private String uri; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof StorageDto)) return false; + StorageDto that = (StorageDto) o; + return Objects.equals(inputFormat, that.inputFormat) && + Objects.equals(outputFormat, that.outputFormat) && + Objects.equals(owner, that.owner) && + Objects.equals(parameters, that.parameters) && + Objects.equals(serdeInfoParameters, that.serdeInfoParameters) && + Objects.equals(serializationLib, that.serializationLib) && + Objects.equals(uri, that.uri); + } + + public String getInputFormat() { + return inputFormat; + } + + public void setInputFormat(String inputFormat) { + this.inputFormat = inputFormat; + } + + public String getOutputFormat() { + return outputFormat; + } + + public void setOutputFormat(String outputFormat) { + this.outputFormat = outputFormat; + } + + public String getOwner() { + return owner; + } + + public void setOwner(String owner) { + this.owner = owner; + } + + public Map getParameters() { + return parameters; + } + + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + public Map getSerdeInfoParameters() { + return serdeInfoParameters; + } + + public void setSerdeInfoParameters(Map serdeInfoParameters) { + this.serdeInfoParameters = serdeInfoParameters; + } + + public String getSerializationLib() { + return serializationLib; + } + + public void setSerializationLib(String serializationLib) { + this.serializationLib = serializationLib; + } + + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @Override + public int hashCode() { + return Objects.hash(inputFormat, outputFormat, owner, parameters, serdeInfoParameters, serializationLib, uri); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/dto/TableDto.java b/metacat-common/src/main/java/com/netflix/metacat/common/dto/TableDto.java new file mode 100644 index 000000000..7456ceca7 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/dto/TableDto.java @@ -0,0 +1,182 @@ +package com.netflix.metacat.common.dto; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.wordnik.swagger.annotations.ApiModel; +import com.wordnik.swagger.annotations.ApiModelProperty; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +@ApiModel("Table metadata") +@SuppressWarnings("unused") +public class TableDto extends BaseDto implements HasDataMetadata, HasDefinitionMetadata { + private static final long serialVersionUID = 5922768252406041451L; + @ApiModelProperty(value = "Contains information about table changes") + private AuditDto audit; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the physical data") + @JsonProperty + private transient ObjectNode dataMetadata; + // Marked as transient because we serialize it manually, however as a JsonProperty because Jackson does serialize it + @ApiModelProperty(value = "metadata attached to the logical table") + @JsonProperty + private transient ObjectNode definitionMetadata; + private List fields; + @ApiModelProperty(value = "Any extra metadata properties of the database table") + private Map metadata; + private QualifiedName name; + @ApiModelProperty(value = "serialization/deserialization info about the table") + private StorageDto serde; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof TableDto)) return false; + TableDto tableDto = (TableDto) o; + return Objects.equals(audit, tableDto.audit) && + Objects.equals(dataMetadata, tableDto.dataMetadata) && + Objects.equals(definitionMetadata, tableDto.definitionMetadata) && + Objects.equals(fields, tableDto.fields) && + Objects.equals(metadata, tableDto.metadata) && + Objects.equals(name, tableDto.name) && + Objects.equals(serde, tableDto.serde); + } + + public AuditDto getAudit() { + return audit; + } + + public void setAudit(AuditDto audit) { + this.audit = audit; + } + + @Override + public ObjectNode getDataMetadata() { + return dataMetadata; + } + + @Override + public void setDataMetadata(ObjectNode dataMetadata) { + this.dataMetadata = dataMetadata; + } + + @Nonnull + @Override + @JsonIgnore + public String getDataUri() { + String uri = serde != null ? serde.getUri() : null; + if (uri == null || uri.isEmpty()) { + throw new IllegalStateException("This instance does not have external data"); + } + + return uri; + } + + @Override + public ObjectNode getDefinitionMetadata() { + return definitionMetadata; + } + + @Override + public void setDefinitionMetadata(ObjectNode definitionMetadata) { + this.definitionMetadata = definitionMetadata; + } + + public List getFields() { + return fields; + } + + public void setFields(List fields) { + this.fields = fields; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + @ApiModelProperty(value = "the name of this entity", required = true) + @JsonProperty + public QualifiedName getName() { + return name; + } + + public void setName(QualifiedName name) { + this.name = name; + } + + @JsonIgnore + public QualifiedName getDefinitionName() { + return name; + } + + @ApiModelProperty(value = "List of partition key names", required = false) + @JsonProperty + public List getPartition_keys() { + if (fields == null) { + return null; + } else if (fields.isEmpty()) { + return Collections.emptyList(); + } + + List keys = new LinkedList<>(); + for (FieldDto field : fields) { + if (field.isPartition_key()) { + keys.add(field.getName()); + } + } + return keys; + } + + @SuppressWarnings("EmptyMethod") + public void setPartition_keys(List ignored) { + } + + public StorageDto getSerde() { + return serde; + } + + public void setSerde(StorageDto serde) { + this.serde = serde; + } + + @Override + public int hashCode() { + return Objects.hash(audit, dataMetadata, definitionMetadata, fields, metadata, name, serde); + } + + @Override + @JsonProperty + public boolean isDataExternal() { + return serde != null && serde.getUri() != null && !serde.getUri().isEmpty(); + } + + @SuppressWarnings("EmptyMethod") + public void setDataExternal(boolean ignored) { + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + dataMetadata = deserializeObjectNode(in); + definitionMetadata = deserializeObjectNode(in); + } + + private void writeObject(ObjectOutputStream out) throws IOException { + out.defaultWriteObject(); + serializeObjectNode(out, dataMetadata); + serializeObjectNode(out, definitionMetadata); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatAlreadyExistsException.java b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatAlreadyExistsException.java new file mode 100644 index 000000000..4a7a03eef --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatAlreadyExistsException.java @@ -0,0 +1,12 @@ +package com.netflix.metacat.common.exception; + +import javax.ws.rs.core.Response; + +/** + * Created by amajumdar on 5/11/15. + */ +public class MetacatAlreadyExistsException extends MetacatException{ + public MetacatAlreadyExistsException(String message) { + super(message, Response.Status.CONFLICT, null); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatBadRequestException.java b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatBadRequestException.java new file mode 100644 index 000000000..998c353c3 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatBadRequestException.java @@ -0,0 +1,12 @@ +package com.netflix.metacat.common.exception; + +import javax.ws.rs.core.Response; + +/** + * TODO: This should be replaced by a BadRequestException from JAX-RS 2.x once we support the newer JAX-RS version. + */ +public class MetacatBadRequestException extends MetacatException { + public MetacatBadRequestException(String reason) { + super(reason, Response.Status.BAD_REQUEST, null); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatException.java b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatException.java new file mode 100644 index 000000000..93177612e --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatException.java @@ -0,0 +1,94 @@ +package com.netflix.metacat.common.exception; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.json.MetacatJson; +import com.netflix.metacat.common.json.MetacatJsonLocator; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +/** + * Created by amajumdar on 3/27/15. + */ +public class MetacatException extends WebApplicationException { + private static final MetacatJson metacatJson = MetacatJsonLocator.INSTANCE; + private static final ObjectNode EMPTY_ERROR = metacatJson.emptyObjectNode().put("error", ""); + + /** + * Construct a new client error exception. + * + * @param status client error status. Must be a {@code 4xx} status code. + * @throws IllegalArgumentException in case the status code is {@code null} or is not from + * {@link javax.ws.rs.core.Response.Status.Family#CLIENT_ERROR} status code + * family. + */ + public MetacatException(Response.Status status) { + this(Response.status(status).type(MediaType.APPLICATION_JSON_TYPE).entity(EMPTY_ERROR).build(), null); + } + + /** + * Construct a new client error exception. + * + * @param status client error status. Must be a {@code 4xx} status code. + * @throws IllegalArgumentException in case the status code is not a valid HTTP status code or + * if it is not from the {@link javax.ws.rs.core.Response.Status.Family#CLIENT_ERROR} + * status code family. + */ + public MetacatException(int status) { + this(Response.status(status).type(MediaType.APPLICATION_JSON_TYPE).entity(EMPTY_ERROR).build(), null); + } + + /** + * Construct a new client error exception. + * + * @param message the detail message (which is saved for later retrieval + * by the {@link #getMessage()} method). + * @param status client error status. Must be a {@code 4xx} status code. + * @param cause the underlying cause of the exception. + * @throws IllegalArgumentException in case the status code is {@code null} or is not from + * {@link javax.ws.rs.core.Response.Status.Family#CLIENT_ERROR} status code + * family. + */ + public MetacatException(String message, Response.Status status, Throwable cause) { + this( + Response.status(status) + .type(MediaType.APPLICATION_JSON_TYPE) + .entity(metacatJson.emptyObjectNode().put("error", message)) + .build(), + cause + ); + } + + /** + * Construct a new client error exception. + * + * @param message the detail message (which is saved for later retrieval + * by the {@link #getMessage()} method). + * @param status client error status. Must be a {@code 4xx} status code. + * @param cause the underlying cause of the exception. + * @throws IllegalArgumentException in case the status code is not a valid HTTP status code or + * if it is not from the {@link javax.ws.rs.core.Response.Status.Family#CLIENT_ERROR} + * status code family. + */ + public MetacatException(String message, int status, Throwable cause) { + this(Response.status(status) + .type(MediaType.APPLICATION_JSON_TYPE) + .entity(metacatJson.emptyObjectNode().put("error", message)) + .build(), + cause); + } + + /** + * Construct a new client error exception. + * + * @param response client error response. Must have a status code set to a {@code 4xx} + * status code. + * @param cause the underlying cause of the exception. + * @throws IllegalArgumentException in case the response status code is not from the + * {@link javax.ws.rs.core.Response.Status.Family#CLIENT_ERROR} status code family. + */ + public MetacatException(Response response, Throwable cause) { + super(cause, response); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotFoundException.java b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotFoundException.java new file mode 100644 index 000000000..86e6e25e6 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotFoundException.java @@ -0,0 +1,12 @@ +package com.netflix.metacat.common.exception; + +import javax.ws.rs.core.Response; + +/** + * TODO: This should be replaced by a NotFoundException from JAX-RS 2.x once we support the newer JAX-RS version. + */ +public class MetacatNotFoundException extends MetacatException { + public MetacatNotFoundException(String message) { + super(message, Response.Status.NOT_FOUND, null); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotSupportedException.java b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotSupportedException.java new file mode 100644 index 000000000..2617ca23e --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatNotSupportedException.java @@ -0,0 +1,16 @@ +package com.netflix.metacat.common.exception; + +import javax.ws.rs.core.Response; + +/** + * Created by amajumdar on 3/30/15. + */ +public class MetacatNotSupportedException extends MetacatException { + public MetacatNotSupportedException() { + this(null); + } + + public MetacatNotSupportedException(String message) { + super(message, Response.Status.UNSUPPORTED_MEDIA_TYPE, null); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatUserMetadataException.java b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatUserMetadataException.java new file mode 100644 index 000000000..ce914f47b --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/exception/MetacatUserMetadataException.java @@ -0,0 +1,12 @@ +package com.netflix.metacat.common.exception; + +import javax.ws.rs.core.Response; + +/** + * TODO: This should be replaced by a BadRequestException from JAX-RS 2.x once we support the newer JAX-RS version. + */ +public class MetacatUserMetadataException extends MetacatException { + public MetacatUserMetadataException(String reason) { + super(reason, Response.Status.SEE_OTHER, null); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJson.java b/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJson.java new file mode 100644 index 000000000..894b97571 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJson.java @@ -0,0 +1,76 @@ +package com.netflix.metacat.common.json; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +public interface MetacatJson { + /** + * Convenience method for doing two-step conversion from given value, into + * instance of given value type. This is functionality equivalent to first + * serializing given value into JSON, then binding JSON data into value + * of given type, but may be executed without fully serializing into + * JSON. Same converters (serializers, deserializers) will be used as for + * data binding, meaning same object mapper configuration works. + * + * @throws MetacatJsonException If conversion fails due to incompatible type; + * if so, root cause will contain underlying checked exception data binding + * functionality threw + */ + T convertValue(Object fromValue, Class toValueType); + + /** + * A helper for implementing Serializable. Reads a boolean to from the inputStream to determine of the next + * object is a json object and if it is it reads it and returns an object node. + * + * @param inputStream the serilization input stream + * @return a json object if one is the next object otherwise null + * @throws IOException on an error reading from the stream or a json serilization error. + */ + @Nullable + ObjectNode deserializeObjectNode(@Nonnull ObjectInputStream inputStream) throws IOException; + + /** + * @return an empty object node + */ + ObjectNode emptyObjectNode(); + + /** + * @return The default ObjectMapper used by this instance. + */ + ObjectMapper getObjectMapper(); + + /** + * @return The default ObjectMapper used by this instance configured to pretty print. + */ + ObjectMapper getPrettyObjectMapper(); + + void mergeIntoPrimary(@Nonnull ObjectNode primary, @Nonnull ObjectNode additional); + + /** + * Parses the given string as json and returns an ObjectNode representing the json. Assumes the json is of a + * json object + * + * @param s a string representing a json object + * @return an object node representation of the string + * @throws MetacatJsonException if unable to convert the string to json or the json isn't a json object. + */ + ObjectNode parseJsonObject(String s); + + T parseJsonValue(String s, Class clazz); + + T parseJsonValue(byte[] s, Class clazz); + + void serializeObjectNode(@Nonnull ObjectOutputStream outputStream, @Nullable ObjectNode json) throws IOException; + + byte[] toJsonAsBytes(Object o); + + ObjectNode toJsonObject(Object o); + + String toJsonString(Object o); +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonException.java b/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonException.java new file mode 100644 index 000000000..9746da85b --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonException.java @@ -0,0 +1,24 @@ +package com.netflix.metacat.common.json; + +public class MetacatJsonException extends RuntimeException { + public MetacatJsonException(String s) { + super(s); + } + + protected MetacatJsonException(String message, Throwable cause, boolean enableSuppression, + boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public MetacatJsonException(Throwable cause) { + super(cause); + } + + public MetacatJsonException(String message, Throwable cause) { + super(message, cause); + } + + public MetacatJsonException() { + super(); + } +} diff --git a/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonLocator.java b/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonLocator.java new file mode 100644 index 000000000..df2b7b194 --- /dev/null +++ b/metacat-common/src/main/java/com/netflix/metacat/common/json/MetacatJsonLocator.java @@ -0,0 +1,176 @@ +package com.netflix.metacat.common.json; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Iterator; +import java.util.Map; + +public enum MetacatJsonLocator implements MetacatJson { + INSTANCE; + + private final ObjectMapper objectMapper; + private final ObjectMapper prettyObjectMapper; + + MetacatJsonLocator() { + objectMapper = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) + .setSerializationInclusion(JsonInclude.Include.ALWAYS); + + prettyObjectMapper = objectMapper.copy().configure(SerializationFeature.INDENT_OUTPUT, true); + } + + @Override + public T convertValue(Object fromValue, Class toValueType) throws IllegalArgumentException { + return objectMapper.convertValue(fromValue, toValueType); + } + + @Override + @Nullable + public ObjectNode deserializeObjectNode(@Nonnull ObjectInputStream inputStream) throws IOException { + boolean exists = inputStream.readBoolean(); + + ObjectNode json = null; + if (exists) { + String s = inputStream.readUTF(); + json = (ObjectNode) objectMapper.readTree(s); + } + + return json; + } + + @Override + public ObjectNode emptyObjectNode() { + return objectMapper.createObjectNode(); + } + + @Override + public ObjectMapper getObjectMapper() { + return objectMapper; + } + + @Override + public ObjectMapper getPrettyObjectMapper() { + return prettyObjectMapper; + } + + @Override + public void mergeIntoPrimary(@Nonnull ObjectNode primary, @Nonnull ObjectNode additional) { + try { + recursiveMerge(primary, additional); + } catch (MetacatJsonException e) { + throw new IllegalArgumentException("Unable to merge '" + additional + "' into '" + primary + "'"); + } + } + + @Override + public ObjectNode parseJsonObject(String s) { + JsonNode node; + try { + node = objectMapper.readTree(s); + } catch (IOException e) { + throw new MetacatJsonException(e); + } + + if (node == null || node.isNull()) { + return null; + } else if (node.isObject()) { + return (ObjectNode) node; + } else { + throw new MetacatJsonException("Cannot convert '" + s + "' to a json object"); + } + } + + @Override + public T parseJsonValue(String s, Class clazz) { + try { + return objectMapper.readValue(s, clazz); + } catch (IOException e) { + throw new MetacatJsonException("Unable to convert '" + s + "' into " + clazz, e); + } + } + + @Override + public T parseJsonValue(byte[] s, Class clazz) { + try { + return objectMapper.readValue(s, clazz); + } catch (IOException e) { + throw new MetacatJsonException("Unable to convert bytes into " + clazz, e); + } + } + + private void recursiveMerge(JsonNode primary, JsonNode additional) { + if (!primary.isObject()) { + throw new MetacatJsonException("This should not be reachable"); + } + + ObjectNode node = (ObjectNode) primary; + + Iterator> fields = additional.fields(); + while (fields.hasNext()) { + Map.Entry entry = fields.next(); + String name = entry.getKey(); + JsonNode value = entry.getValue(); + + // Easiest case, if the primary node doesn't have the current field set the field on the primary + if (!node.has(name)) { + node.set(name, value); + } + // If the primary has the field but the incoming value is not an object set the field on the primary + else if (!value.isObject()) { + node.set(name, value); + } + // If the primary is currently not an object, just overwrite it with the incoming value + else if (!node.get(name).isObject()) { + node.set(name, value); + } + // Otherwise recursively merge the new fields from the incoming object into the primary object + else { + recursiveMerge(node.get(name), value); + } + } + } + + @Override + public void serializeObjectNode(@Nonnull ObjectOutputStream outputStream, @Nullable ObjectNode json) + throws IOException { + boolean exists = json != null; + outputStream.writeBoolean(exists); + if (exists) { + outputStream.writeUTF(json.toString()); + } + } + + @Override + public byte[] toJsonAsBytes(Object o) { + try { + return objectMapper.writeValueAsBytes(o); + } catch (JsonProcessingException e) { + throw new MetacatJsonException(e); + } + } + + @Override + public ObjectNode toJsonObject(Object o) { + return objectMapper.valueToTree(o); + } + + @Override + public String toJsonString(Object o) { + try { + return objectMapper.writeValueAsString(o); + } catch (JsonProcessingException e) { + throw new MetacatJsonException(e); + } + } +} diff --git a/metacat-common/src/test/groovy/com/netflix/metacat/common/QualifiedNameSpec.groovy b/metacat-common/src/test/groovy/com/netflix/metacat/common/QualifiedNameSpec.groovy new file mode 100644 index 000000000..f7d0bd66b --- /dev/null +++ b/metacat-common/src/test/groovy/com/netflix/metacat/common/QualifiedNameSpec.groovy @@ -0,0 +1,227 @@ +package com.netflix.metacat.common + +import com.fasterxml.jackson.databind.node.ObjectNode +import com.netflix.metacat.common.json.MetacatJson +import com.netflix.metacat.common.json.MetacatJsonLocator +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + + +class QualifiedNameSpec extends Specification { + @Shared + MetacatJson metacatJson = MetacatJsonLocator.INSTANCE + + def 'expect exceptions if missing required params at construction time'() { + when: + new QualifiedName(catalogName, databaseName, tableName, partitionName, null) + + then: + thrown(IllegalStateException) + + where: + catalogName | databaseName | tableName | partitionName + null | null | null | null + '' | null | null | null + ' ' | null | null | null + 'c' | null | 't' | null + 'c' | null | null | 'p' + 'c' | null | 't' | 'p' + 'c' | 'd' | null | 'p' + } + + @Unroll + def "expect an exception trying to parse '#input'"() { + when: + QualifiedName.fromString(input) + + then: + Exception e = thrown() + e instanceof IllegalArgumentException || e instanceof IllegalStateException + + where: + input | _ + '' | _ + ' ' | _ + ' /d ' | _ + ' /d/t ' | _ + ' /d/t/p ' | _ + } + + @Unroll + def 'expect to be able to convert #input into #name'() { + expect: + QualifiedName.fromString(input) == name + + where: + input | name + 'c' | QualifiedName.ofCatalog('c') + 'c/' | QualifiedName.ofCatalog('c') + 'c/d' | QualifiedName.ofDatabase('c', 'd') + ' c / d ' | QualifiedName.ofDatabase('c', 'd') + 'c/ d ' | QualifiedName.ofDatabase('c', 'd') + 'c/ d' | QualifiedName.ofDatabase('c', 'd') + 'c/d/t' | QualifiedName.ofTable('c', 'd', 't') + ' c / d / t ' | QualifiedName.ofTable('c', 'd', 't') + 'c/d/t/p' | QualifiedName.ofPartition('c', 'd', 't', 'p') + ' c / d / t / p ' | QualifiedName.ofPartition('c', 'd', 't', 'p') + 'c/d/t/p/trailing' | QualifiedName.ofPartition('c', 'd', 't', 'p/trailing') + ' c / d / t / p / trailing' | QualifiedName.ofPartition('c', 'd', 't', 'p / trailing') + } + + @Unroll + def 'expect to be able to convert #name into #json'() { + expect: + QualifiedName qualifiedName = QualifiedName.fromString(name) + ObjectNode node = metacatJson.parseJsonObject(json) + ObjectNode jsonQualifiedName = metacatJson.toJsonObject(qualifiedName) + node == jsonQualifiedName + + where: + name | json + 'c' | """{"qualifiedName": "c", "catalogName": "c"}""" + 'c/d' | """{"qualifiedName": "c/d", "catalogName": "c", "databaseName": "d"}""" + 'c/d/t' | """{"qualifiedName": "c/d/t", "catalogName": "c", "databaseName": "d", "tableName": "t"}""" + 'c/d/t/p' | """{"qualifiedName": "c/d/t/p", "catalogName": "c", "databaseName": "d", "tableName": "t", "partitionName": "p"}""" + } + + def 'expect exceptions when requesting parameters that a catalog does not have'() { + given: + def name = QualifiedName.ofCatalog('c') + + expect: + name.isCatalogDefinition() + !name.isDatabaseDefinition() + !name.isTableDefinition() + !name.isPartitionDefinition() + + when: + name.catalogName + + then: + notThrown() + + when: + name.databaseName + + then: + thrown(IllegalStateException) + + when: + name.tableName + + then: + thrown(IllegalStateException) + + when: + name.partitionName + + then: + thrown(IllegalStateException) + } + + def 'expect exceptions when requesting parameters that a database does not have'() { + given: + def name = QualifiedName.ofDatabase('c', 'd') + + expect: + name.isCatalogDefinition() + name.isDatabaseDefinition() + !name.isTableDefinition() + !name.isPartitionDefinition() + + when: + name.catalogName + + then: + notThrown() + + when: + name.databaseName + + then: + notThrown() + + when: + name.tableName + + then: + thrown(IllegalStateException) + + when: + name.partitionName + + then: + thrown(IllegalStateException) + } + + def 'expect exceptions when requesting parameters that a table does not have'() { + given: + def name = QualifiedName.ofTable('c', 'd', 't') + + expect: + name.isCatalogDefinition() + name.isDatabaseDefinition() + name.isTableDefinition() + !name.isPartitionDefinition() + + when: + name.catalogName + + then: + notThrown() + + when: + name.databaseName + + then: + notThrown() + + when: + name.tableName + + then: + notThrown() + + when: + name.partitionName + + then: + thrown(IllegalStateException) + } + + def 'expect exceptions when requesting parameters that a partition does not have'() { + given: + def name = QualifiedName.ofPartition('c', 'd', 't', 'p') + + expect: + name.isCatalogDefinition() + name.isDatabaseDefinition() + name.isTableDefinition() + name.isPartitionDefinition() + + when: + name.catalogName + + then: + notThrown() + + when: + name.databaseName + + then: + notThrown() + + when: + name.tableName + + then: + notThrown() + + when: + name.partitionName + + then: + notThrown() + } +} diff --git a/metacat-common/src/test/groovy/com/netflix/metacat/common/dto/DtoVerificationSpec.groovy b/metacat-common/src/test/groovy/com/netflix/metacat/common/dto/DtoVerificationSpec.groovy new file mode 100644 index 000000000..63f086e35 --- /dev/null +++ b/metacat-common/src/test/groovy/com/netflix/metacat/common/dto/DtoVerificationSpec.groovy @@ -0,0 +1,142 @@ +package com.netflix.metacat.common.dto + +import com.fasterxml.jackson.databind.node.ObjectNode +import com.google.common.reflect.ClassPath +import com.netflix.metacat.common.QualifiedName +import com.netflix.metacat.common.json.MetacatJson +import com.netflix.metacat.common.json.MetacatJsonLocator +import org.apache.commons.lang3.RandomStringUtils +import org.apache.commons.lang3.SerializationUtils +import spock.lang.Specification +import spock.lang.Unroll + +import java.beans.Introspector +import java.beans.PropertyDescriptor +import java.lang.reflect.* + +class DtoVerificationSpec extends Specification { + private static final Random rand = new Random() + private static final ClassPath classPath = ClassPath.from(BaseDto.class.classLoader) + private static final MetacatJson metacatJson = MetacatJsonLocator.INSTANCE + + public static Set> getDtoClasses() { + return classPath.getTopLevelClassesRecursive(BaseDto.class.package.name).collect { ClassPath.ClassInfo info -> + return info.load() + }.findAll { Class theClass -> + int modifiers = theClass.modifiers + + return theClass.name.endsWith('Dto') && + Modifier.isPublic(modifiers) && + !Modifier.isAbstract(modifiers) && + !Modifier.isInterface(modifiers) + } + } + + public static Set> getHasDataMetadataClasses() { + return getDtoClasses().findAll { Class theClass -> + return HasDataMetadata.isAssignableFrom(theClass) + } + } + + public static Set> getHasQualifiedNameClasses() { + return getDtoClasses().findAll { Class theClass -> + return theClass.properties.methods.any { Method method -> + method.returnType == QualifiedName + } + } + } + + public static T getRandomDtoInstance(Class clazz) { + T dto = clazz.newInstance() + + def propertyDescriptors = Introspector.getBeanInfo(clazz).propertyDescriptors + for (PropertyDescriptor descriptor : propertyDescriptors) { + Method writeMethod = descriptor.writeMethod + if (writeMethod) { + Class type = descriptor.propertyType + Field field = clazz.declaredFields.find { it.name == descriptor.name } + if (field) { + def randomValue = getRandomValue(type, field) + writeMethod.invoke(dto, randomValue) + } else if (!['dataExternal', 'partition_keys'].contains(descriptor.name)) { + System.out.println("Unable to locate field for descriptor ${descriptor}") + } + } + } + + return dto + } + + public static T getRandomValue(Class clazz) { + return getRandomValue(clazz, null) + } + + @SuppressWarnings("GroovyAssignabilityCheck") + public static T getRandomValue(Class clazz, Field field) { + switch (clazz) { + case String: + return RandomStringUtils.randomAlphabetic(4) + case Boolean: + case Boolean.TYPE: + return rand.nextBoolean() + case Integer: + case Integer.TYPE: + return rand.nextInt() + case Long: + case Long.TYPE: + return rand.nextLong() + case Float: + case Float.TYPE: + return rand.nextFloat() + case Double: + case Double.TYPE: + return rand.nextDouble() + case List: + assert field.genericType instanceof ParameterizedType, "Field ${field} is not generic" + ParameterizedType type = field.genericType as ParameterizedType + return (0..3).collect { + return getRandomValue(Class.forName(type.actualTypeArguments[0].typeName)) + } + case Map: + assert field.genericType instanceof ParameterizedType, "Field ${field} is not generic" + ParameterizedType type = field.genericType as ParameterizedType + def map = [:] + (0..3).each { + Type keyType = type.actualTypeArguments[0] + Type valueType = type.actualTypeArguments[1] + assert keyType.typeName == 'java.lang.String', "Currently only handle string keys" + map.put(RandomStringUtils.randomAlphabetic(4), getRandomValue(Class.forName(valueType.typeName))) + } + return map + case ObjectNode: + ObjectNode node = metacatJson.emptyObjectNode() + node.put(RandomStringUtils.randomAlphabetic(4), RandomStringUtils.randomAlphabetic(4)) + return node + case Date: + return new Date(rand.nextLong()) + case QualifiedName: + return QualifiedName.ofCatalog(RandomStringUtils.randomAlphabetic(4)) + case { clazz.package.name.startsWith(BaseDto.class.package.name) }: + return getRandomDtoInstance(clazz) + default: + throw new IllegalStateException("Unsure how to get a random instance of class: ${clazz}") + } + } + + def 'can get dto classes'() { + expect: + dtoClasses.size() >= 1 + } + + @Unroll + def 'test serialization of #clazz'() { + expect: + assert clazz instanceof Serializable, "Unable to serialize ${clazz}" + Serializable randomInstance1 = getRandomDtoInstance(clazz) as Serializable + + SerializationUtils.roundtrip(randomInstance1).toString() == randomInstance1.toString() + + where: + clazz << dtoClasses + } +} diff --git a/metacat-common/src/test/groovy/com/netflix/metacat/common/json/MetacatJsonSpec.groovy b/metacat-common/src/test/groovy/com/netflix/metacat/common/json/MetacatJsonSpec.groovy new file mode 100644 index 000000000..353a98685 --- /dev/null +++ b/metacat-common/src/test/groovy/com/netflix/metacat/common/json/MetacatJsonSpec.groovy @@ -0,0 +1,74 @@ +package com.netflix.metacat.common.json + +import com.fasterxml.jackson.databind.node.ObjectNode +import com.netflix.metacat.common.dto.DtoVerificationSpec +import spock.lang.Specification +import spock.lang.Unroll + +class MetacatJsonSpec extends Specification { + MetacatJson metacatJson = MetacatJsonLocator.INSTANCE + + @Unroll + def 'can convert #clazz to json and back'() { + given: + def randomInstance = DtoVerificationSpec.getRandomDtoInstance(clazz) + + expect: + def jsonString = metacatJson.toJsonString(randomInstance) + assert jsonString + metacatJson.parseJsonValue(jsonString, clazz).toString() == randomInstance.toString() + + where: + clazz << DtoVerificationSpec.dtoClasses + } + + @Unroll + def 'dataExternal should be included with #clazz which implements HasDataMetadata'() { + given: + def randomInstance = DtoVerificationSpec.getRandomDtoInstance(clazz) + + expect: + def objectNode = metacatJson.toJsonObject(randomInstance) + objectNode.path('dataExternal').isBoolean() + + where: + clazz << DtoVerificationSpec.hasDataMetadataClasses + } + + @Unroll + def 'name should be included with #clazz which has a qualified name instance'() { + given: + def randomInstance = DtoVerificationSpec.getRandomDtoInstance(clazz) + + expect: + def objectNode = metacatJson.toJsonObject(randomInstance) + objectNode.path('name').path('qualifiedName').isTextual() + + where: + clazz << DtoVerificationSpec.hasQualifiedNameClasses + } + + @Unroll + def 'when merging #secondaryString into #primaryString expect #expectedString'() { + when: + ObjectNode primary = metacatJson.parseJsonObject(primaryString) + ObjectNode secondary = metacatJson.parseJsonObject(secondaryString) + ObjectNode expected = metacatJson.parseJsonObject(expectedString) + metacatJson.mergeIntoPrimary(primary, secondary) + + then: + primary == expected + + where: + primaryString | secondaryString | expectedString + """{}""" | """{"new_field": "new_value"}""" | """{"new_field": "new_value"}""" + """{"field": "old_value"}""" | """{"field": "new_value"}""" | """{"field": "new_value"}""" + """{"field1": "value1"}""" | """{"field2": "value2"}""" | """{"field1": "value1", "field2": "value2"}""" + """{"field": "old_value"}""" | """{"field": {"scalar_to_object": true}}""" | """{"field": {"scalar_to_object": true}}""" + """{"field": [1,2,3]}""" | """{"field": [4, 5, 6]}""" | """{"field": [4, 5, 6]}""" + """{"field": [1,2,3]}""" | """{"field": {"array_to_object": true}}""" | """{"field": {"array_to_object": true}}""" + """{"field": {"overwrite_nested": false}}""" | """{"field": {"overwrite_nested": true}}""" | """{"field": {"overwrite_nested": true}}""" + """{"field": {"old_nested_field": 1}}""" | """{"field": {"new_nested_field": 2}}""" | """{"field": {"old_nested_field": 1, "new_nested_field": 2}}""" + """{"field": {"old_nested_field": 1}}""" | """{"field": {"new_nested_field": {"new_nested_nested_field": true}}}""" | """{"field":{"old_nested_field":1,"new_nested_field":{"new_nested_nested_field":true}}}""" + } +} diff --git a/metacat-common/src/test/groovy/com/netflix/metacat/common/util/DataProvider.groovy b/metacat-common/src/test/groovy/com/netflix/metacat/common/util/DataProvider.groovy new file mode 100644 index 000000000..cd93130d8 --- /dev/null +++ b/metacat-common/src/test/groovy/com/netflix/metacat/common/util/DataProvider.groovy @@ -0,0 +1,53 @@ +package com.netflix.metacat.common.util + +import com.netflix.metacat.common.dto.PartitionDto +import com.netflix.metacat.common.dto.TableDto +import com.netflix.metacat.common.json.MetacatJson +import com.netflix.metacat.common.json.MetacatJsonLocator + +/** + * Created by amajumdar on 5/15/15. + */ +class DataProvider { + private static final MetacatJson metacatJson = MetacatJsonLocator.INSTANCE + + def static TableDto getTable(String sourceName, String databaseName, String tableName, String owner, String uri){ + def f = new File('../metacat-common/src/test/resources/tableTemplate.json') + if(!f.exists()){ + f = new File('metacat-common/src/test/resources/tableTemplate.json') + } + if( uri == null){ + uri = String.format("file://tmp/hive/warehouse/%s.db/%s", databaseName, tableName); + } + def tableJson = String.format(f.getText(), sourceName, databaseName, tableName, owner, uri) + return metacatJson.parseJsonValue(tableJson, TableDto.class) + } + + def static List getTables(String sourceName, String databaseName, String tableName, String owner, String uri, int noOfTables){ + def result = [] as List + for(int i=0;i getPartitions(String sourceName, String databaseName, String tableName, String name, String uri, int noOfPartitions){ + def result = [] as List + for(int i=0;i typeConverterProvider) { + MapStructPrestoConverters converters = Mappers.getMapper(MapStructPrestoConverters.class); + converters.setTypeConverter(typeConverterProvider); + return converters; + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/HiveConverters.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/HiveConverters.java new file mode 100644 index 000000000..3518eb845 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/HiveConverters.java @@ -0,0 +1,28 @@ +package com.netflix.metacat.converters; + +import com.facebook.presto.spi.type.TypeManager; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.TableDto; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; + +import java.util.List; + +public interface HiveConverters { + TableDto hiveToMetacatTable(QualifiedName name, Table table, TypeManager typeManager); + + Database metacatToHiveDatabase(DatabaseDto databaseDto); + + Table metacatToHiveTable(TableDto dto, TypeManager typeManager); + + PartitionDto hiveToMetacatPartition(TableDto tableDto, Partition partition); + + List getPartValsFromName(TableDto tableDto, String partName); + + String getNameFromPartVals(TableDto tableDto, List partVals); + + Partition metacatToHivePartition(PartitionDto partitionDto, TableDto tableDto); +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/PrestoConverters.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/PrestoConverters.java new file mode 100644 index 000000000..633b20351 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/PrestoConverters.java @@ -0,0 +1,23 @@ +package com.netflix.metacat.converters; + +import com.facebook.presto.metadata.QualifiedTableName; +import com.facebook.presto.metadata.TableMetadata; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.type.TypeManager; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.TableDto; + +public interface PrestoConverters { + TableMetadata fromTableDto(QualifiedName name, TableDto table, TypeManager typeManager); + + QualifiedTableName getQualifiedTableName(QualifiedName name); + + PartitionDto toPartitionDto(QualifiedName name, ConnectorPartition partition); + + QualifiedName toQualifiedName(QualifiedTableName qualifiedTableName); + + TableDto toTableDto(QualifiedName name, String type, TableMetadata ptm); + + ConnectorPartition fromPartitionDto(PartitionDto partitionDto); +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/TypeConverter.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/TypeConverter.java new file mode 100644 index 000000000..80afe28b6 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/TypeConverter.java @@ -0,0 +1,12 @@ +package com.netflix.metacat.converters; + +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; + +/** + * Created by amajumdar on 4/28/15. + */ +public interface TypeConverter { + Type toType(String type, TypeManager typeManager); + String fromType(Type type); +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/TypeConverterProvider.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/TypeConverterProvider.java new file mode 100644 index 000000000..240a9f1d5 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/TypeConverterProvider.java @@ -0,0 +1,79 @@ +package com.netflix.metacat.converters; + +import com.google.common.base.Throwables; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.server.Config; +import com.netflix.metacat.common.util.MetacatContextManager; +import com.netflix.metacat.converters.impl.HiveTypeConverter; +import com.netflix.metacat.converters.impl.PigTypeConverter; +import com.netflix.metacat.converters.impl.PrestoTypeConverter; + +import javax.inject.Inject; +import javax.inject.Provider; + +import static com.netflix.metacat.common.MetacatContext.DATA_TYPE_CONTEXTS.hive; +import static com.netflix.metacat.common.MetacatContext.DATA_TYPE_CONTEXTS.pig; +import static com.netflix.metacat.common.MetacatContext.DATA_TYPE_CONTEXTS.presto; + +/** + * Created by amajumdar on 10/7/15. + */ +public class TypeConverterProvider implements Provider { + @Inject + Config config; + @Inject + HiveTypeConverter hiveTypeConverter; + @Inject + PigTypeConverter pigTypeConverter; + @Inject + PrestoTypeConverter prestoTypeConverter; + + @Override + public TypeConverter get() { + MetacatContext metacatContext = MetacatContextManager.getContext(); + String dataTypeContext = metacatContext.getDataTypeContext(); + if (hive.name().equalsIgnoreCase(dataTypeContext)) { + return hiveTypeConverter; + } else if (pig.name().equalsIgnoreCase(dataTypeContext)) { + return pigTypeConverter; + } else if (presto.name().equalsIgnoreCase(dataTypeContext)) { + return prestoTypeConverter; + } else { + return getDefaultConverter(); + } + } + + public TypeConverter get(MetacatContext.DATA_TYPE_CONTEXTS context) { + switch (context) { + case hive: + return hiveTypeConverter; + case pig: + return pigTypeConverter; + case presto: + return prestoTypeConverter; + default: + throw new IllegalArgumentException("No handler for " + context); + } + } + + public TypeConverter getDefaultConverter() { + try { + return (TypeConverter) Class.forName(config.getDefaultTypeConverter()).newInstance(); + } catch (Exception e) { + throw Throwables.propagate(e); + } + } + + public MetacatContext.DATA_TYPE_CONTEXTS getDefaultConverterType() { + TypeConverter converter = getDefaultConverter(); + if (converter instanceof HiveTypeConverter) { + return hive; + } else if (converter instanceof PigTypeConverter) { + return pig; + } else if (converter instanceof PrestoTypeConverter) { + return presto; + } else { + throw new IllegalStateException("Unknown handler: " + converter); + } + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/DateConverters.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/DateConverters.java new file mode 100644 index 000000000..f7dccc16a --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/DateConverters.java @@ -0,0 +1,34 @@ +package com.netflix.metacat.converters.impl; + +import com.netflix.metacat.common.server.Config; + +import javax.inject.Inject; +import java.time.Instant; +import java.util.Date; + +public class DateConverters { + private static Config config; + + @Inject + public static void setConfig(Config config) { + DateConverters.config = config; + } + + public Long fromDateToLong(Date d) { + if (d == null) { + return null; + } + + Instant instant = d.toInstant(); + return config.isEpochInSeconds() ? instant.getEpochSecond() : instant.toEpochMilli(); + } + + public Date fromLongToDate(Long l) { + if (l == null) { + return null; + } + + Instant instant = config.isEpochInSeconds() ? Instant.ofEpochSecond(l) : Instant.ofEpochMilli(l); + return Date.from(instant); + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/HiveTypeConverter.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/HiveTypeConverter.java new file mode 100644 index 000000000..1899057c5 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/HiveTypeConverter.java @@ -0,0 +1,220 @@ +package com.netflix.metacat.converters.impl; + +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.type.DateType; +import com.facebook.presto.spi.type.StandardTypes; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.facebook.presto.spi.type.TypeSignature; +import com.facebook.presto.type.MapType; +import com.facebook.presto.type.RowType; +import com.google.common.collect.ImmutableList; +import com.netflix.metacat.converters.TypeConverter; +import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static com.facebook.presto.hive.util.Types.checkType; +import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.facebook.presto.spi.type.BigintType.BIGINT; +import static com.facebook.presto.spi.type.BooleanType.BOOLEAN; +import static com.facebook.presto.spi.type.DateType.DATE; +import static com.facebook.presto.spi.type.DoubleType.DOUBLE; +import static com.facebook.presto.spi.type.TimestampType.TIMESTAMP; +import static com.facebook.presto.spi.type.VarbinaryType.VARBINARY; +import static com.facebook.presto.spi.type.VarcharType.VARCHAR; +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; +import static org.apache.hadoop.hive.serde.serdeConstants.BIGINT_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.BINARY_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.BOOLEAN_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.DATE_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.DOUBLE_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.FLOAT_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.INT_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; +import static org.apache.hadoop.hive.serde.serdeConstants.TIMESTAMP_TYPE_NAME; + +public class HiveTypeConverter implements TypeConverter { + private static Type getPrimitiveType(PrimitiveObjectInspector.PrimitiveCategory primitiveCategory) { + switch (primitiveCategory) { + case BOOLEAN: + return BOOLEAN; + case BYTE: + case SHORT: + case INT: + return INT; + case LONG: + return BIGINT; + case FLOAT: + return FLOAT; + case DOUBLE: + case DECIMAL: + return DOUBLE; + case CHAR: + case STRING: + case VARCHAR: + return VARCHAR; + case DATE: + return DATE; + case TIMESTAMP: + return TIMESTAMP; + case BINARY: + case VOID: + return VARBINARY; + default: + return null; + } + } + + public static Type getType(ObjectInspector fieldInspector, TypeManager typeManager) { + switch (fieldInspector.getCategory()) { + case PRIMITIVE: + PrimitiveObjectInspector.PrimitiveCategory primitiveCategory = ((PrimitiveObjectInspector) fieldInspector).getPrimitiveCategory(); + return getPrimitiveType(primitiveCategory); + case MAP: + MapObjectInspector mapObjectInspector = checkType(fieldInspector, MapObjectInspector.class, + "fieldInspector"); + Type keyType = getType(mapObjectInspector.getMapKeyObjectInspector(), typeManager); + Type valueType = getType(mapObjectInspector.getMapValueObjectInspector(), typeManager); + if (keyType == null || valueType == null) { + return null; + } + return typeManager.getParameterizedType(StandardTypes.MAP, + ImmutableList.of(keyType.getTypeSignature(), valueType.getTypeSignature()), ImmutableList.of()); + case LIST: + ListObjectInspector listObjectInspector = checkType(fieldInspector, ListObjectInspector.class, + "fieldInspector"); + Type elementType = getType(listObjectInspector.getListElementObjectInspector(), typeManager); + if (elementType == null) { + return null; + } + return typeManager.getParameterizedType(StandardTypes.ARRAY, + ImmutableList.of(elementType.getTypeSignature()), ImmutableList.of()); + case STRUCT: + StructObjectInspector structObjectInspector = checkType(fieldInspector, StructObjectInspector.class, + "fieldInspector"); + List fieldTypes = new ArrayList<>(); + List fieldNames = new ArrayList<>(); + for (StructField field : structObjectInspector.getAllStructFieldRefs()) { + fieldNames.add(field.getFieldName()); + Type fieldType = getType(field.getFieldObjectInspector(), typeManager); + if (fieldType == null) { + return null; + } + fieldTypes.add(fieldType.getTypeSignature()); + } + return typeManager.getParameterizedType(StandardTypes.ROW, fieldTypes, fieldNames); + default: + throw new IllegalArgumentException("Unsupported hive type " + fieldInspector.getTypeName()); + } + } + + @Override + public String fromType(Type type) { + if (BOOLEAN.equals(type)) { + return BOOLEAN_TYPE_NAME; + } else if (INT.equals(type)) { + return INT_TYPE_NAME; + } else if (BIGINT.equals(type)) { + return BIGINT_TYPE_NAME; + } else if (FLOAT.equals(type)) { + return FLOAT_TYPE_NAME; + } else if (DOUBLE.equals(type)) { + return DOUBLE_TYPE_NAME; + } else if (VARCHAR.equals(type)) { + return STRING_TYPE_NAME; + } else if (VARBINARY.equals(type)) { + return BINARY_TYPE_NAME; + } else if (DateType.DATE.equals(type)) { + return DATE_TYPE_NAME; + } else if (TIMESTAMP.equals(type)) { + return TIMESTAMP_TYPE_NAME; + } else if (type.getTypeSignature().getBase().equals(StandardTypes.MAP)) { + MapType mapType = (MapType) type; + return "map<" + fromType(mapType.getKeyType()) + "," + fromType(mapType.getValueType()) + ">"; + } else if (type.getTypeSignature().getBase().equals(StandardTypes.ARRAY)) { + String typeString = type.getTypeParameters().stream().map(this::fromType).collect(Collectors.joining(",")); + return "array<" + typeString + ">"; + } else if (type.getTypeSignature().getBase().equals(StandardTypes.ROW)) { + RowType rowType = (RowType) type; + String typeString = rowType.getFields() + .stream() + .map(this::rowFieldToString) + .collect(Collectors.joining(",")); + return "struct<" + typeString + ">"; + } else { + throw new PrestoException(NOT_SUPPORTED, "unsupported type: " + type); + } + } + + private String rowFieldToString(RowType.RowField rowField) { + String prefix = ""; + if (rowField.getName().isPresent()) { + prefix = rowField.getName().get() + ":"; + } + + return prefix + fromType(rowField.getType()); + } + + @Override + public Type toType(String type, TypeManager typeManager) { + // Hack to fix presto "varchar" type coming in with no length which is required by Hive. + if ("varchar".equals(type)) { + type = STRING_TYPE_NAME; + } + TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(type); + ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(typeInfo); + // The standard struct object inspector forces field names to lower case, however in Metacat we need to preserve + // the original case of the struct fields so we wrap it with our wrapper to force the fieldNames to keep + // their original case + if (typeInfo.getCategory().equals(ObjectInspector.Category.STRUCT)) { + StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; + StandardStructObjectInspector objectInspector = (StandardStructObjectInspector) oi; + oi = new SameCaseStandardStructObjectInspector(structTypeInfo.getAllStructFieldNames(), objectInspector); + } + return getType(oi, typeManager); + } + + // This is protected and extends StandardStructObjectInspector so it can reference MyField + protected static class SameCaseStandardStructObjectInspector extends StandardStructObjectInspector { + private final List realFieldNames; + private final StandardStructObjectInspector structObjectInspector; + + public SameCaseStandardStructObjectInspector(List realFieldNames, + StandardStructObjectInspector structObjectInspector) { + this.realFieldNames = realFieldNames; + this.structObjectInspector = structObjectInspector; + } + + @Override + public List getAllStructFieldRefs() { + return structObjectInspector.getAllStructFieldRefs() + .stream() + .map(structField -> (MyField) structField) + .map(field -> new SameCaseMyField(field.getFieldID(), realFieldNames.get(field.getFieldID()), + field.getFieldObjectInspector(), field.getFieldComment())) + .collect(Collectors.toList()); + } + + protected static class SameCaseMyField extends MyField { + public SameCaseMyField(int fieldID, String fieldName, ObjectInspector fieldObjectInspector, + String fieldComment) { + super(fieldID, fieldName, fieldObjectInspector, fieldComment); + // Since super lower cases fieldName, this is to restore the original case + this.fieldName = fieldName; + } + } + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/MapStructHiveConverters.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/MapStructHiveConverters.java new file mode 100644 index 000000000..a3e0866c0 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/MapStructHiveConverters.java @@ -0,0 +1,394 @@ +package com.netflix.metacat.converters.impl; + +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.AuditDto; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.FieldDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.StorageDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.converters.HiveConverters; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.mapstruct.Mapper; +import org.mapstruct.ReportingPolicy; + +import java.time.Instant; +import java.util.Collections; +import java.util.Date; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.EMPTY_MAP; + +@Mapper(uses = DateConverters.class, + unmappedTargetPolicy = ReportingPolicy.ERROR, + componentModel = "default") +public abstract class MapStructHiveConverters implements HiveConverters { + private static final Splitter SLASH_SPLITTER = Splitter.on('/'); + private static final Splitter EQUAL_SPLITTER = Splitter.on('=').limit(2); + + @VisibleForTesting + Integer dateToEpochSeconds(Date date) { + if (date == null) { + return null; + } + + Instant instant = date.toInstant(); + long seconds = instant.getEpochSecond(); + if (seconds <= Integer.MAX_VALUE) { + return (int) seconds; + } + + throw new IllegalStateException("Unable to convert date " + date + " to an integer seconds value"); + } + + private Date epochSecondsToDate(long seconds) { + Instant instant = Instant.ofEpochSecond(seconds); + return Date.from(instant); + } + + private FieldDto hiveToMetacatField(TypeManager typeManager, FieldSchema field, boolean isPartitionKey) { + FieldDto dto = new FieldDto(); + dto.setName(field.getName()); + dto.setType(field.getType()); + dto.setSource_type(field.getType()); + dto.setComment(field.getComment()); + dto.setPartition_key(isPartitionKey); + + return dto; + } + + private FieldSchema metacatToHiveField(FieldDto fieldDto) { + FieldSchema result = new FieldSchema(); + result.setName(fieldDto.getName()); + result.setType(fieldDto.getType()); + result.setComment(fieldDto.getComment()); + return result; + } + + @Override + public TableDto hiveToMetacatTable(QualifiedName name, Table table, TypeManager typeManager) { + TableDto dto = new TableDto(); + dto.setSerde(toStorageDto(table.getSd(), table.getOwner())); + dto.setAudit(new AuditDto()); + dto.setName(name); + if (table.isSetCreateTime()) { + dto.getAudit().setCreatedDate(epochSecondsToDate(table.getCreateTime())); + } + dto.setMetadata(table.getParameters()); + + List nonPartitionColumns = table.getSd().getCols(); + List partitionColumns = table.getPartitionKeys(); + List allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size()); + nonPartitionColumns.stream() + .map(field -> this.hiveToMetacatField(typeManager, field, false)) + .forEachOrdered(allFields::add); + partitionColumns.stream() + .map(field -> this.hiveToMetacatField(typeManager, field, true)) + .forEachOrdered(allFields::add); + dto.setFields(allFields); + + return dto; + } + + @Override + @SuppressWarnings("unchecked") + public Database metacatToHiveDatabase(DatabaseDto dto) { + Database database = new Database(); + + String name = ""; + String description = ""; + QualifiedName databaseName = dto.getName(); + if (databaseName != null) { + name = databaseName.getDatabaseName(); + // Since this is required setting it to the same as the DB name for now + description = databaseName.getDatabaseName(); + } + database.setName(name); + database.setDescription(description); + + String dbUri = dto.getUri(); + if (Strings.isNullOrEmpty(dbUri)) { + dbUri = ""; + } + database.setLocationUri(dbUri); + + Map metadata = dto.getMetadata(); + if (metadata == null) { + metadata = EMPTY_MAP; + } + database.setParameters(metadata); + + return database; + } + + @Override + public Table metacatToHiveTable(TableDto dto, TypeManager typeManager) { + Table table = new Table(); + String tableName = ""; + String databaseName = ""; + + QualifiedName name = dto.getName(); + if (name != null) { + tableName = name.getTableName(); + databaseName = name.getDatabaseName(); + } + table.setTableName(tableName); + table.setDbName(databaseName); + + StorageDto storageDto = dto.getSerde(); + String owner = ""; + if (storageDto != null && storageDto.getOwner() != null) { + owner = storageDto.getOwner(); + } + table.setOwner(owner); + + AuditDto auditDto = dto.getAudit(); + if (auditDto != null && auditDto.getCreatedDate() != null) { + table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate())); + } + + Map params = Collections.emptyMap(); + if (dto.getMetadata() != null) { + params = dto.getMetadata(); + } + table.setParameters(params); + + // TODO get this + table.setTableType("EXTERNAL_TABLE"); + + table.setSd(fromStorageDto(storageDto)); + StorageDescriptor sd = table.getSd(); + + List fields = dto.getFields(); + if (fields == null) { + table.setPartitionKeys(Collections.emptyList()); + sd.setCols(Collections.emptyList()); + } else { + List nonPartitionFields = Lists.newArrayListWithCapacity(fields.size()); + List partitionFields = Lists.newArrayListWithCapacity(fields.size()); + for (FieldDto fieldDto : fields) { + FieldSchema f = metacatToHiveField(fieldDto); + + if (fieldDto.isPartition_key()) { + partitionFields.add(f); + } else { + nonPartitionFields.add(f); + } + } + table.setPartitionKeys(partitionFields); + sd.setCols(nonPartitionFields); + } + + return table; + } + + private StorageDto toStorageDto(StorageDescriptor sd, String owner) { + StorageDto result = new StorageDto(); + if (sd != null) { + result.setOwner(owner); + result.setUri(sd.getLocation()); + result.setInputFormat(sd.getInputFormat()); + result.setOutputFormat(sd.getOutputFormat()); + result.setParameters(sd.getParameters()); + SerDeInfo serde = sd.getSerdeInfo(); + if (serde != null) { + result.setSerializationLib(serde.getSerializationLib()); + result.setSerdeInfoParameters(serde.getParameters()); + } + } + return result; + } + + private StorageDescriptor fromStorageDto(StorageDto storageDto) { + // Set all required fields to a non-null value + StorageDescriptor result = new StorageDescriptor(); + String inputFormat = ""; + String location = ""; + String outputFormat = ""; + String serdeName = ""; + String serializationLib = ""; + Map sdParams = EMPTY_MAP; + Map serdeParams = EMPTY_MAP; + + if (storageDto != null) { + if (storageDto.getInputFormat() != null) { + inputFormat = storageDto.getInputFormat(); + } + if (storageDto.getUri() != null) { + location = storageDto.getUri(); + } + if (storageDto.getOutputFormat() != null) { + outputFormat = storageDto.getOutputFormat(); + } + if (storageDto.getSerializationLib() != null) { + serializationLib = storageDto.getSerializationLib(); + } + if (storageDto.getParameters() != null) { + sdParams = storageDto.getParameters(); + } + if (storageDto.getSerdeInfoParameters() != null) { + serdeParams = storageDto.getSerdeInfoParameters(); + } + } + + result.setInputFormat(inputFormat); + result.setLocation(location); + result.setOutputFormat(outputFormat); + result.setSerdeInfo(new SerDeInfo(serdeName, serializationLib, serdeParams)); + result.setCols(Collections.emptyList()); + result.setBucketCols(Collections.emptyList()); + result.setSortCols(Collections.emptyList()); + result.setParameters(sdParams); + return result; + } + + @Override + public PartitionDto hiveToMetacatPartition(TableDto tableDto, Partition partition) { + QualifiedName tableName = tableDto.getName(); + QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), + tableName.getTableName(), getNameFromPartVals(tableDto, partition.getValues())); + + PartitionDto result = new PartitionDto(); + String owner = ""; + if (tableDto.getSerde() != null) { + owner = tableDto.getSerde().getOwner(); + } + result.setSerde(toStorageDto(partition.getSd(), owner)); + result.setMetadata(partition.getParameters()); + + AuditDto auditDto = new AuditDto(); + auditDto.setCreatedDate(epochSecondsToDate(partition.getCreateTime())); + auditDto.setLastModifiedDate(epochSecondsToDate(partition.getLastAccessTime())); + result.setAudit(auditDto); + result.setName(partitionName); + return result; + } + + @Override + public List getPartValsFromName(TableDto tableDto, String partName) { + // Unescape the partition name + + LinkedHashMap hm = null; + try { + hm = Warehouse.makeSpecFromName(partName); + } catch (MetaException e) { + throw new IllegalArgumentException("Invalid partition name", e); + } + + List partVals = Lists.newArrayList(); + for (String key : tableDto.getPartition_keys()) { + String val = hm.get(key); + if (val == null) { + throw new IllegalArgumentException("Invalid partition name - missing " + key); + } + partVals.add(val); + } + return partVals; + } + + @Override + public String getNameFromPartVals(TableDto tableDto, List partVals) { + List partitionKeys = tableDto.getPartition_keys(); + if (partitionKeys.size() != partVals.size()) { + throw new IllegalArgumentException("Not the same number of partition columns and partition values"); + } + + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < partitionKeys.size(); i++) { + if (builder.length() > 0) { + builder.append('/'); + } + + builder.append(partitionKeys.get(i)); + builder.append('='); + builder.append(partVals.get(i)); + } + return builder.toString(); + } + + @Override + public Partition metacatToHivePartition(PartitionDto partitionDto, TableDto tableDto) { + Partition result = new Partition(); + + QualifiedName name = partitionDto.getName(); + List values = Lists.newArrayListWithCapacity(16); + String databaseName = ""; + String tableName = ""; + if (name != null) { + if (name.getPartitionName() != null) { + for (String partialPartName : SLASH_SPLITTER.split(partitionDto.getName().getPartitionName())) { + List nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName)); + if (nameValues.size() != 2) { + throw new IllegalStateException("Unrecognized partition name: " + partitionDto.getName()); + } + String value = nameValues.get(1); + values.add(value); + } + } + + if (name.getDatabaseName() != null) { + databaseName = name.getDatabaseName(); + } + + if (name.getTableName() != null) { + tableName = name.getTableName(); + } + } + result.setValues(values); + result.setDbName(databaseName); + result.setTableName(tableName); + + Map metadata = partitionDto.getMetadata(); + if (metadata == null) { + metadata = EMPTY_MAP; + } + result.setParameters(metadata); + + result.setSd(fromStorageDto(partitionDto.getSerde())); + StorageDescriptor sd = result.getSd(); + if (tableDto != null) { + if (sd.getSerdeInfo() != null && tableDto.getSerde() != null && Strings.isNullOrEmpty( + sd.getSerdeInfo().getSerializationLib())) { + sd.getSerdeInfo().setSerializationLib(tableDto.getSerde().getSerializationLib()); + } + + List fields = tableDto.getFields(); + if (fields == null) { + sd.setCols(Collections.emptyList()); + } else { + sd.setCols(fields.stream() + .filter(field -> !field.isPartition_key()) + .map(this::metacatToHiveField) + .collect(Collectors.toList())); + } + } + + AuditDto auditDto = partitionDto.getAudit(); + if (auditDto != null) { + if (auditDto.getCreatedDate() != null) { + result.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate())); + } + if (auditDto.getLastModifiedDate() != null) { + result.setLastAccessTime(dateToEpochSeconds(auditDto.getLastModifiedDate())); + } + } + + return result; + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/MapStructPrestoConverters.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/MapStructPrestoConverters.java new file mode 100644 index 000000000..26ce172e1 --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/MapStructPrestoConverters.java @@ -0,0 +1,258 @@ +package com.netflix.metacat.converters.impl; + +import com.facebook.presto.metadata.QualifiedTableName; +import com.facebook.presto.metadata.TableMetadata; +import com.facebook.presto.spi.AuditInfo; +import com.facebook.presto.spi.ColumnDetailMetadata; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionDetail; +import com.facebook.presto.spi.ConnectorPartitionDetailImpl; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.TupleDomain; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.collect.Lists; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.AuditDto; +import com.netflix.metacat.common.dto.FieldDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.StorageDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.converters.PrestoConverters; +import com.netflix.metacat.converters.TypeConverter; +import org.mapstruct.InheritInverseConfiguration; +import org.mapstruct.Mapper; +import org.mapstruct.Mapping; +import org.mapstruct.ReportingPolicy; + +import javax.inject.Provider; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static com.google.common.base.Preconditions.checkNotNull; + +@Mapper(uses = DateConverters.class, + unmappedTargetPolicy = ReportingPolicy.ERROR, + componentModel = "default") +public abstract class MapStructPrestoConverters implements PrestoConverters { + Provider typeConverter; + public void setTypeConverter(Provider typeConverter){ + this.typeConverter = typeConverter; + } + + private List columnsFromTable(TableDto table, TypeManager typeManager) { + if (table.getFields() == null) { + return Lists.newArrayList(); + } + + return table.getFields().stream() + .map(fieldDto -> fromFieldDto(fieldDto, typeManager)) + .collect(Collectors.toList()); + } + + @InheritInverseConfiguration + protected abstract AuditInfo fromAuditDto(AuditDto audit); + + protected ColumnMetadata fromFieldDto(FieldDto fieldDto, TypeManager typeManager) { + String type = fieldDto.getType(); + if( type == null){ + type = fieldDto.getSource_type(); + } + return new ColumnMetadata( + fieldDto.getName(), + typeConverter.get().toType(type, typeManager), + fieldDto.isPartition_key(), + fieldDto.getComment(), + false + ); + } + + @InheritInverseConfiguration + protected abstract StorageInfo fromStorageDto(StorageDto serde); + + @Override + public TableMetadata fromTableDto(QualifiedName name, TableDto table, TypeManager typeManager) { + checkNotNull(name, "name is null"); + checkNotNull(table, "table is null"); + checkNotNull(typeManager, "typeManager is null"); + + return new TableMetadata(name.getCatalogName(), fromTableDto(table, typeManager)); + } + + protected ConnectorTableMetadata fromTableDto(TableDto table, TypeManager typeManager) { + return new ConnectorTableDetailMetadata( + toSchemaTableName(table.getName()), + columnsFromTable(table, typeManager), + table.getSerde()==null?null:table.getSerde().getOwner(), + fromStorageDto(table.getSerde()), + table.getMetadata(), + fromAuditDto(table.getAudit()) + ); + } + + @Override + public QualifiedTableName getQualifiedTableName(QualifiedName name) { + return new QualifiedTableName(name.getCatalogName(), name.getDatabaseName(), name.getTableName()); + } + + protected AuditDto toAuditDto(ConnectorTableMetadata connectorTableMetadata) { + if (connectorTableMetadata != null && connectorTableMetadata instanceof ConnectorTableDetailMetadata) { + return toAuditDto(((ConnectorTableDetailMetadata) connectorTableMetadata).getAuditInfo()); + } + + return null; + } + + protected AuditDto toAuditDto(ConnectorPartition connectorPartition) { + if (connectorPartition != null && connectorPartition instanceof ConnectorPartitionDetail) { + return toAuditDto(((ConnectorPartitionDetail) connectorPartition).getAuditInfo()); + } + + return null; + } + + @Mapping(target = "createdBy", source = "createdBy") + @Mapping(target = "createdDate", source = "createdDate") + @Mapping(target = "lastModifiedBy", source = "lastUpdatedBy") + @Mapping(target = "lastModifiedDate", source = "lastUpdatedDate") + protected abstract AuditDto toAuditDto(AuditInfo auditInfo); + + @Mapping(target = "name", source = "name") + @Mapping(target = "type", source = "type") + @Mapping(target = "partition_key", source = "partitionKey") + @Mapping(target = "comment", source = "comment") + @Mapping(target = "source_type", ignore = true) + @Mapping(target = "pos", ignore = true) + @Mapping(target = "isNullable", ignore = true) + @Mapping(target = "size", ignore = true) + @Mapping(target = "defaultValue", ignore = true) + @Mapping(target = "isSortKey", ignore = true) + @Mapping(target = "isIndexKey", ignore = true) + protected abstract FieldDto toFieldDto(ColumnMetadata column); + + protected List toFieldDtos(List columns){ + List result = Lists.newArrayList(); + if( columns != null ){ + for(int i=0; i < columns.size(); i++){ + ColumnMetadata column = columns.get(i); + FieldDto fieldDto = toFieldDto(column); + if( column instanceof ColumnDetailMetadata){ + ColumnDetailMetadata columnDetail = (ColumnDetailMetadata) column; + fieldDto.setSource_type(columnDetail.getSourceType()); + fieldDto.setIsNullable(columnDetail.getIsNullable()); + fieldDto.setSize(columnDetail.getSize()); + fieldDto.setDefaultValue(columnDetail.getDefaultValue()); + fieldDto.setIsIndexKey(columnDetail.getIsIndexKey()); + fieldDto.setIsSortKey(columnDetail.getIsSortKey()); + } + fieldDto.setPos(i); + result.add(fieldDto); + } + } + return result; + } + + protected Map toMetadata(ConnectorTableMetadata metadata) { + if (metadata != null && metadata instanceof ConnectorTableDetailMetadata) { + ConnectorTableDetailMetadata detailMetadata = (ConnectorTableDetailMetadata) metadata; + return detailMetadata.getMetadata(); + } + + return null; + } + + protected Map toMetadata(ConnectorPartition partition) { + if (partition != null && partition instanceof ConnectorPartitionDetail) { + ConnectorPartitionDetail partitionDetail = (ConnectorPartitionDetail) partition; + return partitionDetail.getMetadata(); + } + + return null; + } + + @Mapping(target = "name", source = "name") + @Mapping(target = "audit", source = "partition") + @Mapping(target = "serde", source = "partition") + @Mapping(target = "metadata", source = "partition") + @Mapping(target = "dataExternal", ignore = true) + @Mapping(target = "dataMetadata", ignore = true) + @Mapping(target = "definitionMetadata", ignore = true) + @Override + public abstract PartitionDto toPartitionDto(QualifiedName name, ConnectorPartition partition); + + protected List toPartitionKeys(List columns) { + return columns.stream() + .filter(ColumnMetadata::isPartitionKey) + .map(ColumnMetadata::getName) + .collect(Collectors.toList()); + } + + @Override + public QualifiedName toQualifiedName(QualifiedTableName qualifiedTableName) { + return QualifiedName.ofTable(qualifiedTableName.getCatalogName(), qualifiedTableName.getSchemaName(), + qualifiedTableName.getTableName()); + } + + public SchemaTableName toSchemaTableName(QualifiedName name) { + return new SchemaTableName(name.getDatabaseName(), name.getTableName()); + } + + @Mapping(target = "owner", ignore = true) + @Mapping(target = "parameters", source = "parameters") + @Mapping(target = "serdeInfoParameters", source = "serdeInfoParameters") + @Mapping(target = "serializationLib", source = "serializationLib") + @Mapping(target = "inputFormat", source = "inputFormat") + @Mapping(target = "outputFormat", source = "outputFormat") + @Mapping(target = "uri", source = "uri") + protected abstract StorageDto toStorageDto(StorageInfo storageInfo); + + protected StorageDto toStorageDto(ConnectorTableMetadata connectorTableMetadata) { + if (connectorTableMetadata != null && connectorTableMetadata instanceof ConnectorTableDetailMetadata) { + ConnectorTableDetailMetadata detailMetadata = (ConnectorTableDetailMetadata) connectorTableMetadata; + StorageDto storageDto = toStorageDto(detailMetadata.getStorageInfo()); + storageDto.setOwner(detailMetadata.getOwner()); + return storageDto; + } + + return null; + } + + protected StorageDto toStorageDto(ConnectorPartition connectorPartition) { + if (connectorPartition != null && connectorPartition instanceof ConnectorPartitionDetail) { + ConnectorPartitionDetail detailMetadata = (ConnectorPartitionDetail) connectorPartition; + StorageDto storageDto = toStorageDto(detailMetadata.getStorageInfo()); + if(detailMetadata.getAuditInfo() != null) { + storageDto.setOwner(detailMetadata.getAuditInfo().getCreatedBy()); + } + return storageDto; + } + + return null; + } + + protected String toString(Type type) { + return typeConverter.get().fromType(type); + } + + @Mapping(target = "name", source = "name") + @Mapping(target = "metadata", source = "ptm.metadata") + @Mapping(target = "dataExternal", ignore = true) + @Mapping(target = "dataMetadata", ignore = true) + @Mapping(target = "definitionMetadata", ignore = true) + @Mapping(target = "audit", source = "ptm.metadata") + @Mapping(target = "partition_keys", source = "ptm.columns") + @Mapping(target = "serde", source = "ptm.metadata") + @Mapping(target = "fields", source = "ptm.columns") + @Override + public abstract TableDto toTableDto(QualifiedName name, String type, TableMetadata ptm); + + @Override + public ConnectorPartition fromPartitionDto(PartitionDto partitionDto) { + return new ConnectorPartitionDetailImpl(partitionDto.getName().getPartitionName(), TupleDomain.none(), fromStorageDto(partitionDto.getSerde()), partitionDto.getMetadata(), fromAuditDto(partitionDto.getAudit())); + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/PigTypeConverter.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/PigTypeConverter.java new file mode 100644 index 000000000..f51864e2a --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/PigTypeConverter.java @@ -0,0 +1,184 @@ +package com.netflix.metacat.converters.impl; + +import com.facebook.presto.spi.type.BigintType; +import com.facebook.presto.spi.type.BooleanType; +import com.facebook.presto.spi.type.DateType; +import com.facebook.presto.spi.type.DoubleType; +import com.facebook.presto.spi.type.TimestampType; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.facebook.presto.spi.type.VarbinaryType; +import com.facebook.presto.spi.type.VarcharType; +import com.facebook.presto.type.ArrayType; +import com.facebook.presto.type.FloatType; +import com.facebook.presto.type.IntType; +import com.facebook.presto.type.MapType; +import com.facebook.presto.type.RowType; +import com.facebook.presto.type.UnknownType; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.netflix.metacat.converters.TypeConverter; +import org.apache.pig.data.DataType; +import org.apache.pig.impl.logicalLayer.FrontendException; +import org.apache.pig.impl.logicalLayer.schema.Schema; +import org.apache.pig.impl.util.Utils; +import org.apache.pig.newplan.logical.Util; +import org.apache.pig.newplan.logical.relational.LogicalSchema; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Optional; + +import static java.lang.String.format; + +/** + * Created by amajumdar on 4/27/15. + */ +public class PigTypeConverter implements TypeConverter{ + private static final Logger log = LoggerFactory.getLogger(PigTypeConverter.class); + private static final String NAME_ARRAY_ELEMENT = "array_element"; + public Type toType(String pigType, TypeManager typeManager){ + try { + LogicalSchema schema = Utils.parseSchema(pigType); + LogicalSchema.LogicalFieldSchema field = schema.getField(0); + return toPrestoType( field); + } catch (Exception e) { + log.warn("Pig Parsing failed for signature {}", pigType, e); + throw new IllegalArgumentException(format("Bad type signature: '%s'", pigType)); + } + } + + public String fromType(Type prestoType){ + Schema schema = new Schema(Util.translateFieldSchema(fromPrestoTypeToPigSchema(null, prestoType))); + StringBuilder result = new StringBuilder(); + try { + Schema.stringifySchema( result, schema, DataType.GENERIC_WRITABLECOMPARABLE, Integer.MIN_VALUE); + } catch (FrontendException e) { + throw new IllegalArgumentException(format("Bad presto type: '%s'", prestoType)); + } + return result.toString(); + } + + private LogicalSchema.LogicalFieldSchema fromPrestoTypeToPigSchema(String alias, Type prestoType){ + if( VarbinaryType.VARBINARY.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.BYTEARRAY); + } else if ( BooleanType.BOOLEAN.equals( prestoType)) { + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.BOOLEAN); + } else if( IntType.INT.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.INTEGER); + } else if( BigintType.BIGINT.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.LONG); + } else if( FloatType.FLOAT.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.FLOAT); + } else if( DoubleType.DOUBLE.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.DOUBLE); + } else if( TimestampType.TIMESTAMP.equals(prestoType) || DateType.DATE.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.DATETIME); + } else if( VarcharType.VARCHAR.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.CHARARRAY); + } else if( UnknownType.UNKNOWN.equals(prestoType)){ + return new LogicalSchema.LogicalFieldSchema(alias, null, DataType.UNKNOWN); + } else if( prestoType instanceof MapType){ + MapType mapType = (MapType) prestoType; + LogicalSchema schema = null; + // + // map in Presto is map[] in PIG + // + if( mapType.getValueType() != null && !UnknownType.UNKNOWN.equals(mapType.getValueType())){ + schema = new LogicalSchema(); + schema.addField(fromPrestoTypeToPigSchema(null, mapType.getValueType())); + } + return new LogicalSchema.LogicalFieldSchema(alias, schema, DataType.MAP); + } else if( prestoType instanceof ArrayType){ + ArrayType arrayType = (ArrayType) prestoType; + LogicalSchema schema = new LogicalSchema(); + Type elementType = arrayType.getElementType(); + if( elementType != null){ + if( !(elementType instanceof RowType)){ + elementType = new RowType(Lists.newArrayList(elementType), Optional.of(ImmutableList.of(NAME_ARRAY_ELEMENT))); + } + schema.addField(fromPrestoTypeToPigSchema(null, elementType)); + } + return new LogicalSchema.LogicalFieldSchema(alias, schema, DataType.BAG); + + } else if( prestoType instanceof RowType){ + RowType rowType = (RowType) prestoType; + LogicalSchema schema = new LogicalSchema(); + rowType.getFields().stream().forEach( + rowField -> schema.addField(fromPrestoTypeToPigSchema(rowField.getName().isPresent()?rowField.getName().get():null, rowField.getType()))); + return new LogicalSchema.LogicalFieldSchema(alias, schema, DataType.TUPLE); + } + throw new IllegalArgumentException(format("Bad presto type: '%s'", prestoType)); + } + + private Type toPrestoType(LogicalSchema.LogicalFieldSchema field){ + switch(field.type){ + case DataType.BOOLEAN: + return BooleanType.BOOLEAN; + case DataType.BYTE: + case DataType.BYTEARRAY: + return VarbinaryType.VARBINARY; + case DataType.INTEGER: + return IntType.INT; + case DataType.LONG: + case DataType.BIGINTEGER: + return BigintType.BIGINT; + case DataType.FLOAT: + return FloatType.FLOAT; + case DataType.DOUBLE: + case DataType.BIGDECIMAL: + return DoubleType.DOUBLE; + case DataType.DATETIME: + return TimestampType.TIMESTAMP; + case DataType.CHARARRAY: + case DataType.BIGCHARARRAY: + return VarcharType.VARCHAR; + case DataType.MAP: + return toPrestoMapType(field); + case DataType.BAG: + return toPrestoArrayType(field); + case DataType.TUPLE: + return toPrestoRowType( field); + case DataType.UNKNOWN: + return UnknownType.UNKNOWN; + } + throw new IllegalArgumentException(format("Bad type signature: '%s'", field.toString())); + } + + private Type toPrestoRowType(LogicalSchema.LogicalFieldSchema field) { + List fieldTypes = Lists.newArrayList(); + List fieldNames = Lists.newArrayList(); + field.schema.getFields().stream().forEach(logicalFieldSchema -> { + fieldTypes.add(toPrestoType( logicalFieldSchema)); + fieldNames.add(logicalFieldSchema.alias); + }); + return new RowType(fieldTypes, Optional.of(fieldNames)); + } + + private Type toPrestoArrayType(LogicalSchema.LogicalFieldSchema field) { + LogicalSchema.LogicalFieldSchema subField = field.schema.getField(0); + Type elementType = null; + if( subField.type == DataType.TUPLE + && subField.schema.getFields() != null + && !subField.schema.getFields().isEmpty() + && NAME_ARRAY_ELEMENT.equals(subField.schema.getFields().get(0).alias)){ + elementType = toPrestoType(subField.schema.getFields().get(0)); + } else { + elementType = toPrestoType(subField); + } + return new ArrayType(elementType); + } + + private Type toPrestoMapType(LogicalSchema.LogicalFieldSchema field) { + Type key = VarcharType.VARCHAR; + Type value = UnknownType.UNKNOWN; + if( field.schema != null){ + List fields = field.schema.getFields(); + if(fields.size() > 0){ + value = toPrestoType(fields.get(0)); + } + } + return new MapType(key, value); + } +} diff --git a/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/PrestoTypeConverter.java b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/PrestoTypeConverter.java new file mode 100644 index 000000000..592b3a63a --- /dev/null +++ b/metacat-converters/src/main/java/com/netflix/metacat/converters/impl/PrestoTypeConverter.java @@ -0,0 +1,32 @@ +package com.netflix.metacat.converters.impl; + +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.facebook.presto.spi.type.TypeSignature; +import com.netflix.metacat.converters.TypeConverter; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 4/28/15. + */ +public class PrestoTypeConverter implements TypeConverter { + @Override + public Type toType(String type, TypeManager typeManager) { + TypeSignature signature = typeSignatureFromString(type); + return checkNotNull(typeFromTypeSignature(signature, typeManager), "Invalid type %s", type); + } + + @Override + public String fromType(Type type) { + return type.getDisplayName(); + } + + Type typeFromTypeSignature(TypeSignature typeSignature, TypeManager typeManager) { + return typeManager.getType(typeSignature); + } + + TypeSignature typeSignatureFromString(String s) { + return TypeSignature.parseTypeSignature(s); + } +} diff --git a/metacat-converters/src/test/groovy/com/netflix/metacat/converters/HiveTypeConverterSpec.groovy b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/HiveTypeConverterSpec.groovy new file mode 100644 index 000000000..441f6c1b4 --- /dev/null +++ b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/HiveTypeConverterSpec.groovy @@ -0,0 +1,67 @@ +package com.netflix.metacat.converters + +import com.facebook.presto.spi.type.TypeManager +import com.facebook.presto.type.TypeRegistry +import com.netflix.metacat.converters.impl.HiveTypeConverter +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +class HiveTypeConverterSpec extends Specification { + @Shared + HiveTypeConverter converter = new HiveTypeConverter() + @Shared + TypeManager typeManager = new TypeRegistry() + + @Unroll + def 'can convert "#typeString" to a presto type and back'(String typeString) { + expect: + def prestoType = converter.toType(typeString, typeManager) + def hiveType = converter.fromType(prestoType) + prestoType == converter.toType(hiveType, typeManager) + where: + typeString << [ + 'tinyint', + 'smallint', + 'int', + 'bigint', + 'float', + 'double', + 'decimal', + 'decimal(4,2)', + 'timestamp', + 'date', + 'string', + 'varchar(10)', + 'char(10)', + 'boolean', + 'binary', + 'array', + 'array', + 'array', + 'array', + 'array', + 'array>', + 'array>', + 'array>', + 'array>', + 'array>', + 'array>', + 'array>', + 'array>', + 'map', + 'map', + 'map', + 'map', + 'map', + 'map', + 'map>>', + 'struct', + 'struct', + 'struct', + 'struct', + 'struct', + 'struct', + ] + } +} diff --git a/metacat-converters/src/test/groovy/com/netflix/metacat/converters/PigTypeConverterSpec.groovy b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/PigTypeConverterSpec.groovy new file mode 100644 index 000000000..3fa3a80ef --- /dev/null +++ b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/PigTypeConverterSpec.groovy @@ -0,0 +1,54 @@ +package com.netflix.metacat.converters + +import com.facebook.presto.spi.type.TypeManager +import com.facebook.presto.type.TypeRegistry +import com.netflix.metacat.converters.impl.PigTypeConverter +import spock.lang.Shared +import spock.lang.Specification + +class PigTypeConverterSpec extends Specification { + @Shared + PigTypeConverter converter = new PigTypeConverter() + @Shared + TypeManager typeManager = new TypeRegistry() + + def "parse type signatures"(String typeString) { + expect: + def prestoType = converter.toType(typeString, typeManager) + def pigType = converter.fromType(prestoType) + prestoType == converter.toType(pigType, typeManager) + where: + typeString << [ + "{t:(name:chararray)}", + "map[]", + "bytearray", + "long", + "int", + "chararray", + "{(start_date:int, end_date:int)}", + "Map[chararray]", + "{t: (client_request_id: chararray,event_utc_ms: long)}", + "{t: (play_timestamp: long,play_duration: int,play_title_id: int, play_request_id:chararray)}", + "{t: (credit_request_id: chararray)}", + "{t: (credit_timestamp: long)}", + "{(account_id: int,signup_date: int,cancel_date: int,cancel_reason: chararray,voluntary_cancel: int,billing_nbr: int,device: chararray,isp: chararray,sessions: long,playsessions: long,totalhours: double,playhours: double,genre: chararray,network: chararray,errorrate: long,startplayabortrate: long,rebufferrate: double,stallrate: double,changerate: double,downswitchrate: double,average_video_rate: int,startplay_video_rate: int,initial235share: long,all_starts_video_rate: int,playdelay: int,rebufferplaydelay: int,seekplaydelay: int,earlyrebufferrate: double,laterebufferrate: double,timetoquality: int,subsequentvideorate: int,initial30videorate: int,playdelay10: int,playdelay30: int,playdelay50: int,playdelay70: int,playdelay90: int)}", + //"string", + //"bag", + "{(title_id: int)}", + "double", + //"boolean", + "float", + //"Integer", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (credit_request_id: {t: (credit_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float , play_rank:int)})}, join_type:chararray)}", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (client_request_id: {t: (client_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float, play_rank:int)})}, join_type:chararray)}", + "{(show_title_id:int, presentation_rank_number:int, join_type:chararray, play_duration_mins:double, play_count:int, play_start_time:int, runtime_minutes:double, is_tv:int, pvr_rank:int, is_novel:int, is_title_played:int)}", + //"map", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (credit_request_id: {t: (credit_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float , play_rank:int)})}, join_type:chararray,interleaving:chararray)}", + "{(show_title_id: int,location_id: int,vhs_bag: {t: (view_utc_sec: long,view_duration: int,title_id: int, play_request_id: chararray, runtime_minutes:float, play_rank:int)})}", + "map[{t:(client_request_id: chararray,event_utc_ms: long)}]", + "{rows: (track_id: int, sub_root_uuid: chararray, list_type: chararray, item_type: chararray, hasevidence: chararray, listContext: chararray, genre_id: int, evidence: map[], presentation_row_number: int, mmid: {t: (show_title_id: int, source_title_id: int, evidence: chararray, evidenceType: chararray, context: chararray, interleaving: chararray)}, diversity_score: int,random_group: chararray,is_fallback: chararray,expected_reward_score: chararray, track_ids: {track_id: (track_id: int)})}", + "{(show_title_id: int,location_id: int,vhs_bag: {t: (view_utc_sec: long,view_duration: int,title_id: int, play_request_id: chararray, runtime_minutes:float, play_row: int)})}", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (credit_request_id: {t: (credit_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float , play_rank:int)})}, join_type:chararray, interleaving: chararray)}", + "{(presentation_rank_number: int, show_title_id: int)}"] + } +} diff --git a/metacat-converters/src/test/groovy/com/netflix/metacat/converters/TypeSignatureSpec.groovy b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/TypeSignatureSpec.groovy new file mode 100644 index 000000000..d013e21d1 --- /dev/null +++ b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/TypeSignatureSpec.groovy @@ -0,0 +1,72 @@ +package com.netflix.metacat.converters + +import spock.lang.Ignore +import spock.lang.Specification + +import java.util.regex.Matcher +import java.util.regex.Pattern + +class TypeSignatureSpec extends Specification { + + def "parse type signatures"(String typeString) { + expect: + def prestoTypeString = toPrestoTypeString(typeString) + + where: + typeString << [ + "{t:(name:chararray)}", + "map[]", + "bytearray", + "long", + "int", + "chararray", + "{(start_date:int, end_date:int)}", + "Map[chararray]", + "{t: (client_request_id: chararray,event_utc_ms: long)}", + "{t: (play_timestamp: long,play_duration: int,play_title_id: int, play_request_id:chararray)}", + "{t: (credit_request_id: chararray)}", + "{t: (credit_timestamp: long)}", + "{(account_id: int,signup_date: int,cancel_date: int,cancel_reason: chararray,voluntary_cancel: int,billing_nbr: int,device: chararray,isp: chararray,sessions: long,playsessions: long,totalhours: double,playhours: double,genre: chararray,network: chararray,errorrate: long,startplayabortrate: long,rebufferrate: double,stallrate: double,changerate: double,downswitchrate: double,average_video_rate: int,startplay_video_rate: int,initial235share: long,all_starts_video_rate: int,playdelay: int,rebufferplaydelay: int,seekplaydelay: int,earlyrebufferrate: double,laterebufferrate: double,timetoquality: int,subsequentvideorate: int,initial30videorate: int,playdelay10: int,playdelay30: int,playdelay50: int,playdelay70: int,playdelay90: int)}", + "string", + "bag", + "{(title_id: int)}", + "double", + "boolean", + "float", + "Integer", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (credit_request_id: {t: (credit_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float , play_rank:int)})}, join_type:chararray)}", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (client_request_id: {t: (client_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float, play_rank:int)})}, join_type:chararray)}", + "{(show_title_id:int, presentation_rank_number:int, join_type:chararray, play_duration_mins:double, play_count:int, play_start_time:int, runtime_minutes:double, is_tv:int, pvr_rank:int, is_novel:int, is_title_played:int)}", + "map", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (credit_request_id: {t: (credit_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float , play_rank:int)})}, join_type:chararray,interleaving:chararray)}", + "{(show_title_id: int,location_id: int,vhs_bag: {t: (view_utc_sec: long,view_duration: int,title_id: int, play_request_id: chararray, runtime_minutes:float, play_rank:int)})}", + "map[{t:(client_request_id: chararray,event_utc_ms: long)}]", + "{rows: (track_id: int, sub_root_uuid: chararray, list_type: chararray, item_type: chararray, hasevidence: chararray, listContext: chararray, genre_id: int, evidence: map[], presentation_row_number: int, mmid: {t: (show_title_id: int, source_title_id: int, evidence: chararray, evidenceType: chararray, context: chararray, interleaving: chararray)}, diversity_score: int,random_group: chararray,is_fallback: chararray,expected_reward_score: chararray, track_ids: {track_id: (track_id: int)})}", + "{(show_title_id: int,location_id: int,vhs_bag: {t: (view_utc_sec: long,view_duration: int,title_id: int, play_request_id: chararray, runtime_minutes:float, play_row: int)})}", + "{(show_title_id: int,source_title_id: int,title_evidence: chararray,title_evidence_type: chararray,row_context: chararray,presentation_rank_number: int,play_count: int,play_duration: int,vhs_bag: {t: (credit_request_id: {t: (credit_request_id: chararray)},credit_timestamp: {t: (credit_timestamp: long)},titles: {t: (play_timestamp: long,play_duration: int,play_title_id: int,play_request_id: chararray,runtime_minutes: float , play_rank:int)})}, join_type:chararray, interleaving: chararray)}", + "{(presentation_rank_number: int, show_title_id: int)}"] + } + + @Ignore + def toPrestoTypeString(String typeString){ + def tokens = ['}':'>','[':'<',']':'>','#':',',':':' ','{':'array<','(':'row\\(',' ':'']; + def keys = ['}',Pattern.quote('['),']','#',':',' ',Pattern.quote('{')+'[a-zA-Z_0-9_\\s]*[:]?',Pattern.quote('(')] + //def tokenKeys = [Pattern.quote('{'),'t:','}','\\[',']',Pattern.quote('('),'#',':'] + // Create pattern of the format "%(cat|beverage)%" + String patternString = "(" + keys.join("|") + ")"; + Pattern pattern = Pattern.compile(patternString); + Matcher matcher = pattern.matcher(typeString); + + StringBuffer sb = new StringBuffer(); + while(matcher.find()) { + def match = matcher.group(1) + def replace = tokens.get(match) + if( !replace && match.startsWith('{')){ + replace = tokens.get('{') + } + matcher.appendReplacement(sb, replace); + } + matcher.appendTail(sb); + return sb.toString() + } +} diff --git a/metacat-converters/src/test/groovy/com/netflix/metacat/converters/impl/MapStructHiveConvertersSpec.groovy b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/impl/MapStructHiveConvertersSpec.groovy new file mode 100644 index 000000000..5ccb8ec96 --- /dev/null +++ b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/impl/MapStructHiveConvertersSpec.groovy @@ -0,0 +1,478 @@ +package com.netflix.metacat.converters.impl + +import com.facebook.presto.spi.type.TypeManager +import com.facebook.presto.spi.type.VarcharType +import com.netflix.metacat.common.QualifiedName +import com.netflix.metacat.common.dto.AuditDto +import com.netflix.metacat.common.dto.DatabaseDto +import com.netflix.metacat.common.dto.FieldDto +import com.netflix.metacat.common.dto.PartitionDto +import com.netflix.metacat.common.dto.StorageDto +import com.netflix.metacat.common.dto.TableDto +import com.netflix.metacat.common.server.Config +import org.apache.hadoop.hive.metastore.api.FieldSchema +import org.apache.hadoop.hive.metastore.api.Partition +import org.apache.hadoop.hive.metastore.api.SerDeInfo +import org.apache.hadoop.hive.metastore.api.StorageDescriptor +import org.apache.hadoop.hive.metastore.api.Table +import org.joda.time.Instant +import org.mapstruct.factory.Mappers +import spock.lang.Specification + +import java.time.LocalDateTime +import java.time.ZoneId +import java.time.ZoneOffset + +class MapStructHiveConvertersSpec extends Specification { + private static final ZoneOffset PACIFIC = LocalDateTime.now().atZone(ZoneId.of('America/Los_Angeles')).offset + Config config = Mock(Config) + TypeManager typeManager = Mock(TypeManager) + MapStructHiveConverters converter + HiveTypeConverter hiveTypeConverter = Mock(HiveTypeConverter) + + def setup() { + // Stub this to always return true + config.isEpochInSeconds() >> true + DateConverters.setConfig(config) + converter = Mappers.getMapper(MapStructHiveConverters) + } + + def 'test date to epoch seconds'() { + when: + def result = converter.dateToEpochSeconds(input) + + then: + result == output + + where: + input | output + null | null + Instant.parse('2016-02-25T14:47:27').toDate() | Instant.parse('2016-02-25T14:47:27').getMillis() / 1000 + } + + def 'test date to epoch seconds failure'() { + when: + converter.dateToEpochSeconds(input) + + then: + thrown(IllegalStateException) + + where: + input = Date.from(LocalDateTime.of(9999, 2, 25, 14, 47, 27).toInstant(PACIFIC)) + } + + def 'test metacatToHiveDatabase sets all required fields to non-null values'() { + given: + def dto = new DatabaseDto() + + when: + def db = converter.metacatToHiveDatabase(dto) + + then: + db + db.name != null + db.description != null + db.locationUri != null + db.parameters != null + } + + def 'test metacatToHiveDatabase'() { + given: + def dto = new DatabaseDto( + name: QualifiedName.ofDatabase('catalog', databaseName), + uri: dbUri, + metadata: metadata + ) + + when: + def db = converter.metacatToHiveDatabase(dto) + + then: + db + db.name == databaseName + db.description == databaseName + db.locationUri == dbUri + db.parameters == metadata + + where: + databaseName = 'db' + dbUri = 'http://example.com' + metadata = [k1: 'v1', k2: 'v2'] + } + + def 'test metacatToHiveTable sets all required fields to non-null values'() { + given: + TableDto dto = new TableDto() + + when: + def table = converter.metacatToHiveTable(dto, typeManager) + + then: + table + table.tableName != null + table.dbName != null + table.owner != null + table.sd != null + table.sd.cols != null + table.sd.location != null + table.sd.inputFormat != null + table.sd.outputFormat != null + table.sd.serdeInfo != null + table.sd.serdeInfo.name != null + table.sd.serdeInfo.serializationLib != null + table.sd.serdeInfo.parameters != null + table.sd.bucketCols != null + table.sd.sortCols != null + table.sd.parameters != null + table.partitionKeys != null + table.parameters != null + table.tableType != null + } + + def 'test metacatToHiveTable'() { + when: + def table = converter.metacatToHiveTable(dto, typeManager) + + then: + table.dbName == databaseName + table.tableName == tableName + table.owner == owner + table.createTime == Instant.parse('2016-02-25T14:47:27').getMillis() / 1000 + table.sd.cols.size() == fields.size() - 2 + table.sd.cols.each { + it.name.startsWith('field_') + it.comment.startsWith('comment_') + it.type == 'VARCHAR' + } + table.sd.location == location + table.sd.inputFormat == inputFormat + table.sd.outputFormat == outputFormat + table.sd.serdeInfo.name == '' + table.sd.serdeInfo.serializationLib == serializationLib + table.sd.serdeInfo.parameters == serdeInfoParams + table.sd.parameters == storageParams + table.partitionKeys.size() == 2 + table.partitionKeys.each { + it.name.startsWith('field_') + it.comment.startsWith('comment_') + it.type == 'VARCHAR' + } + table.parameters == tableParams + table.tableType == 'EXTERNAL_TABLE' + + where: + databaseName = 'database' + tableName = 'table' + owner = 'owner' + createDate = Instant.parse('2016-02-25T14:47:27').toDate() + location = 'location' + inputFormat = 'inputFormat' + outputFormat = 'outputFormat' + serializationLib = 'serializationLib' + storageParams = ['sk1': 'sv1'] + serdeInfoParams = ['sik1': 'siv1'] + fields = (0..9).collect { + new FieldDto(name: "field_$it", partition_key: it < 2, comment: "comment_$it", type: 'VARCHAR') + } + tableParams = ['tk1': 'tv1'] + dto = new TableDto( + name: QualifiedName.ofTable('catalog', databaseName, tableName), + audit: new AuditDto(createdDate: createDate), + serde: new StorageDto( + owner: owner, + uri: location, + inputFormat: inputFormat, + outputFormat: outputFormat, + serializationLib: serializationLib, + parameters: storageParams, + serdeInfoParameters: serdeInfoParams + ), + fields: fields, + metadata: tableParams + ) + } + + def 'test hiveToMetacatTable'() { + when: + def dto = converter.hiveToMetacatTable(name, table, typeManager) + + then: + hiveTypeConverter.toType('VARCHAR', typeManager) >> VarcharType.VARCHAR + dto.name == name + dto.name.databaseName == databaseName + dto.name.tableName == tableName + dto.audit.createdDate == Instant.parse('2016-02-25T14:47:27').toDate() + dto.serde.owner == owner + dto.serde.uri == location + dto.serde.inputFormat == inputFormat + dto.serde.outputFormat == outputFormat + dto.serde.serializationLib == serializationLib + dto.serde.serdeInfoParameters == serdeInfoParams + dto.serde.parameters == sdParams + dto.fields.size() == columns.size() + partitonKeys.size() + dto.fields.each { + it.name.startsWith('field_') + it.comment.startsWith('comment_') + it.type == 'VARCHAR' + } + dto.fields.findAll { it.partition_key }.size() == 2 + dto.metadata == tableParams + + where: + databaseName = 'database' + tableName = 'table' + name = QualifiedName.ofTable('catalog', databaseName, tableName) + owner = 'owner' + createDate = Instant.parse('2016-02-25T14:47:27').toDate() + location = 'location' + inputFormat = 'inputFormat' + outputFormat = 'outputFormat' + serializationLib = 'serializationLib' + serdeInfoParams = ['sipk1': 'sipv1'] + sdParams = ['sdk1': 'sdv1'] + columns = (0..7).collect { new FieldSchema(name: "field_$it", comment: "comment_$it", type: 'VARCHAR') } + partitonKeys = (0..1).collect { new FieldSchema(name: "field_$it", comment: "comment_$it", type: 'VARCHAR') } + tableParams = ['tk1': 'tv1'] + table = new Table( + dbName: databaseName, + tableName: tableName, + owner: owner, + createTime: Instant.parse('2016-02-25T14:47:27').getMillis() / 1000, + sd: new StorageDescriptor( + cols: columns, + location: location, + inputFormat: inputFormat, + outputFormat: outputFormat, + serdeInfo: new SerDeInfo( + name: 'serdeName', + serializationLib: serializationLib, + parameters: serdeInfoParams, + ), + parameters: sdParams + ), + partitionKeys: partitonKeys, + parameters: tableParams, + ) + } + + def 'test metacatToHivePartition sets all required fields to non-null values'() { + given: + TableDto tableDto = new TableDto() + PartitionDto dto = new PartitionDto() + + when: + def partition = converter.metacatToHivePartition(dto, tableDto) + + then: + partition + partition.values != null + partition.tableName != null + partition.dbName != null + partition.parameters != null + partition.sd != null + partition.sd.cols != null + partition.sd.location != null + partition.sd.inputFormat != null + partition.sd.outputFormat != null + partition.sd.serdeInfo != null + partition.sd.serdeInfo.name != null + partition.sd.serdeInfo.serializationLib != null + partition.sd.serdeInfo.parameters != null + partition.sd.bucketCols != null + partition.sd.sortCols != null + partition.sd.parameters != null + } + + def 'test metacatToHivePartition'() { + when: + def partition = converter.metacatToHivePartition(dto, tableDto) + + then: + partition.values == ['CAPS', 'lower', '3'] + partition.tableName == tableName + partition.dbName == databaseName + partition.createTime == Instant.parse('2016-02-25T14:47:27').getMillis() / 1000 + partition.lastAccessTime == Instant.parse('2016-02-25T14:47:27').getMillis() / 1000 + partition.sd.cols.size() == fields.size() - 2 + partition.sd.cols.each { + it.name.startsWith('field_') + it.comment.startsWith('comment_') + it.type == 'VARCHAR' + } + partition.sd.location == location + partition.sd.inputFormat == inputFormat + partition.sd.outputFormat == outputFormat + partition.sd.serdeInfo.name == '' + partition.sd.serdeInfo.serializationLib == serializationLib + partition.sd.serdeInfo.parameters == serdeInfoParams + partition.sd.parameters == sdParams + partition.parameters == partitionParams + + where: + databaseName = 'database' + tableName = 'table' + partitionName = 'key1=CAPS/key2=lower/key3=3' + owner = 'owner' + createDate = Instant.parse('2016-02-25T14:47:27').toDate() + location = 'location' + inputFormat = 'inputFormat' + outputFormat = 'outputFormat' + serializationLib = 'serializationLib' + sdParams = ['sdk1': 'sdv1'] + serdeInfoParams = ['sipk1': 'sipv1'] + fields = (0..9).collect { + new FieldDto(name: "field_$it", partition_key: it < 2, comment: "comment_$it", type: 'VARCHAR') + } + partitionParams = ['tk1': 'tv1'] + dto = new PartitionDto( + name: QualifiedName.ofPartition('catalog', databaseName, tableName, partitionName), + audit: new AuditDto(createdDate: createDate, lastModifiedDate: createDate), + serde: new StorageDto( + owner: owner, + uri: location, + inputFormat: inputFormat, + outputFormat: outputFormat, + serializationLib: serializationLib, + parameters: sdParams, + serdeInfoParameters: serdeInfoParams, + ), + metadata: partitionParams, + ) + tableDto = new TableDto( + fields: fields + ) + } + + def 'test metacatToHivePartition can handle a partition name with multiple equals'() { + when: + def partition = converter.metacatToHivePartition(dto, tableDto) + + then: + partition.values == ['weird=true', '', 'monk'] + + where: + dto = new PartitionDto( + name: QualifiedName.ofPartition('c', 'd', 't', 'this=weird=true/bob=/someone=monk') + ) + tableDto = new TableDto() + } + + def 'test metacatToHivePartition throws an error on invalid partition name'() { + when: + converter.metacatToHivePartition(dto, tableDto) + + then: + thrown(IllegalStateException) + + where: + dto = new PartitionDto( + name: QualifiedName.ofPartition('c', 'd', 't', 'fail') + ) + tableDto = new TableDto() + } + + def 'test metacatToHivePartition copies serialization lib from the table if there is not one on the partition'() { + when: + def partition = converter.metacatToHivePartition(dto, tableDto) + + then: + partition.sd.serdeInfo.serializationLib == serializationLib + + where: + serializationLib = 'serializationLib' + dto = new PartitionDto() + tableDto = new TableDto( + serde: new StorageDto( + serializationLib: serializationLib + ) + ) + } + + def 'test hiveToMetacatPartition'() { + when: + def dto = converter.hiveToMetacatPartition(tableDto, partition) + + then: + dto.name == QualifiedName.ofPartition('catalog', databaseName, tableName, 'key1=CAPS/key2=lower/key3=3') + dto.name.databaseName == databaseName + dto.name.tableName == tableName + dto.audit.createdDate == Instant.parse('2016-02-25T14:47:27').toDate() + dto.serde.owner == owner + dto.serde.uri == location + dto.serde.inputFormat == inputFormat + dto.serde.outputFormat == outputFormat + dto.serde.serializationLib == serializationLib + dto.serde.serdeInfoParameters == serdeParams + dto.serde.parameters == sdParams + dto.metadata == partitionParams + + where: + databaseName = 'database' + tableName = 'table' + owner = 'owner' + createDate = Instant.parse('2016-02-25T14:47:27').toDate() + location = 'location' + inputFormat = 'inputFormat' + outputFormat = 'outputFormat' + serializationLib = 'serializationLib' + serdeParams = ['k1': 'v1'] + sdParams = ['sdk1': 'sdv1'] + partitionParams = ['tk1': 'tv1'] + partition = new Partition( + dbName: databaseName, + tableName: tableName, + createTime: Instant.parse('2016-02-25T14:47:27').getMillis() / 1000, + values: ['CAPS', 'lower', '3'], + sd: new StorageDescriptor( + cols: [new FieldSchema('col1', 'VARCHAR', 'comment_1')], + location: location, + inputFormat: inputFormat, + outputFormat: outputFormat, + serdeInfo: new SerDeInfo( + name: 'serdeName', + serializationLib: serializationLib, + parameters: serdeParams, + ), + parameters: sdParams + ), + parameters: partitionParams, + ) + tableDto = new TableDto( + name: QualifiedName.ofTable('catalog', databaseName, tableName), + fields: [ + new FieldDto(name: 'key1', partition_key: true, comment: 'comment_1', type: 'VARCHAR'), + new FieldDto(name: 'key2', partition_key: true, comment: 'comment_2', type: 'VARCHAR'), + new FieldDto(name: 'key3', partition_key: true, comment: 'comment_3', type: 'VARCHAR'), + new FieldDto(name: 'col1', partition_key: false, comment: 'comment_1', type: 'VARCHAR'), + ], + serde: new StorageDto( + owner: owner + ), + ) + } + + def 'test hiveToMetacatPartition fails if wrong number of partition values'() { + given: + def partition = new Partition( + values: partitionValues + ) + def tableDto = new TableDto( + name: QualifiedName.ofTable('c', 'd', 't'), + fields: [ + new FieldDto(name: 'key1', partition_key: true, comment: 'comment_1', type: 'VARCHAR'), + new FieldDto(name: 'key2', partition_key: true, comment: 'comment_2', type: 'VARCHAR'), + new FieldDto(name: 'key3', partition_key: true, comment: 'comment_3', type: 'VARCHAR'), + new FieldDto(name: 'col1', partition_key: false, comment: 'comment_1', type: 'VARCHAR'), + ] + ) + + when: + converter.hiveToMetacatPartition(tableDto, partition) + + then: + thrown(IllegalArgumentException) + + where: + partitionValues << [[], ['too few'], ['still', 'too few'], ['too', 'many', 'values', 'present']] + } +} diff --git a/metacat-converters/src/test/groovy/com/netflix/metacat/converters/impl/PrestoConvertersSpec.groovy b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/impl/PrestoConvertersSpec.groovy new file mode 100644 index 000000000..adf0206da --- /dev/null +++ b/metacat-converters/src/test/groovy/com/netflix/metacat/converters/impl/PrestoConvertersSpec.groovy @@ -0,0 +1,258 @@ +package com.netflix.metacat.converters.impl + +import com.facebook.presto.metadata.QualifiedTableName +import com.facebook.presto.metadata.TableMetadata +import com.facebook.presto.spi.* +import com.facebook.presto.spi.type.BigintType +import com.facebook.presto.spi.type.TypeManager +import com.facebook.presto.spi.type.VarcharType +import com.facebook.presto.type.TypeRegistry +import com.google.inject.Provider +import com.netflix.metacat.common.QualifiedName +import com.netflix.metacat.common.dto.* +import com.netflix.metacat.common.server.Config +import com.netflix.metacat.converters.TypeConverter +import de.danielbechler.diff.ObjectDiffer +import de.danielbechler.diff.ObjectDifferBuilder +import de.danielbechler.diff.node.DiffNode +import de.danielbechler.diff.node.Visit +import org.mapstruct.factory.Mappers +import org.slf4j.Logger +import org.slf4j.LoggerFactory +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.LocalDateTime +import java.time.Month +import java.time.ZoneId +import java.time.ZoneOffset + +class PrestoConvertersSpec extends Specification { + private static final Logger log = LoggerFactory.getLogger(PrestoConvertersSpec) + private static final ZoneOffset PACIFIC = LocalDateTime.now().atZone(ZoneId.of('America/Los_Angeles')).offset + + @Shared + TypeManager typeManager = new TypeRegistry() + @Shared + MapStructPrestoConverters converter + @Shared + Config config = Mock(Config) + + def setupSpec(){ + // Stub this to always return true + config.isEpochInSeconds() >> true + DateConverters.setConfig(config) + converter = Mappers.getMapper(MapStructPrestoConverters.class) + converter.setTypeConverter( new Provider() { + @Override + TypeConverter get() { + return new PrestoTypeConverter() + } + }) + } + + boolean objectsSame(def actual, def expected) { + ObjectDifferBuilder builder = ObjectDifferBuilder.startBuilding() + builder.inclusion() + .exclude().propertyNameOfType(TableDto, 'dataUri').also() + .exclude().propertyNameOfType(PartitionDto, 'dataUri').also() + .exclude().propertyNameOfType(QualifiedName, 'databaseName', 'tableName', 'partitionName') + + ObjectDiffer differ = builder.build() + DiffNode diff = differ.compare(actual, expected) + if (diff.hasChanges()) { + diff.visit({ DiffNode node, Visit visit -> + log.info("DIFFERENCE {} => {}", node.getPath(), node.getState()) + } as DiffNode.Visitor) + } + return !diff.hasChanges() + } + + TableDto getDummyTableDto(int i) { + if (i == 0) { + return new TableDto( + audit: new AuditDto( + createdBy: 'dwatson', + createdDate: Date.from(LocalDateTime.of(2015, Month.APRIL, 8, 4, 33, 47).toInstant(PACIFIC)), + lastModifiedBy: 'amajumdar', + lastModifiedDate: Date.from(LocalDateTime.of(2015, Month.APRIL, 8, 4, 51, 12).toInstant(PACIFIC)), + ), + dataMetadata: null, + definitionMetadata: null, + fields: [ + new FieldDto( + comment: null, + name: 'c1', + partition_key: true, + pos: 0, + source_type: null, + type: 'bigint' + ), + new FieldDto( + comment: null, + name: 'c2', + partition_key: true, + pos: 1, + source_type: null, + type: 'varchar' + ) + ], + metadata: ['mdc1': 'mdv1'], + name: QualifiedName.fromString('catalog/db/table'), + serde: new StorageDto( + inputFormat: 'siIF', + outputFormat: 'siOF', + owner: 'owner', + parameters: ['sik1': 'siv1'], + serdeInfoParameters: ['sipk1': 'sipv1'], + serializationLib: 'siSL', + uri: 'siUri' + ) + ) + } + } + + TableMetadata getDummyTableMetadata(int i) { + if (i == 0) { + return new TableMetadata('catalog', new ConnectorTableDetailMetadata( + new SchemaTableName('db', 'table'), + [ + new ColumnMetadata('c1', BigintType.BIGINT, true), + new ColumnMetadata('c2', VarcharType.VARCHAR, true) + ], + 'owner', + new StorageInfo('siUri', 'siIF', 'siOF', 'siSL', ['sik1': 'siv1'], ['sipk1': 'sipv1']), + ['mdc1': 'mdv1'], + new AuditInfo( + 'dwatson', + 'amajumdar', + LocalDateTime.of(2015, Month.APRIL, 8, 4, 33, 47).toInstant(PACIFIC).getEpochSecond(), + LocalDateTime.of(2015, Month.APRIL, 8, 4, 51, 12).toInstant(PACIFIC).getEpochSecond() + ) + )) + } + } + + PartitionDto getDummyPartitionDto(int i) { + if (i == 0) { + return new PartitionDto( + audit: new AuditDto( + createdBy: 'dwatson', + createdDate: Date.from(LocalDateTime.of(2015, Month.APRIL, 8, 4, 33, 47).toInstant(PACIFIC)), + lastModifiedBy: 'amajumdar', + lastModifiedDate: Date.from(LocalDateTime.of(2015, Month.APRIL, 8, 4, 51, 12).toInstant(PACIFIC)), + ), + dataMetadata: null, + definitionMetadata: null, + name: QualifiedName.fromString('catalog/db/table/partition'), + serde: new StorageDto( + inputFormat: 'siIF', + outputFormat: 'siOF', + owner: 'dwatson', + parameters: ['sik1': 'siv1'], + serdeInfoParameters: ['sipk1': 'sipv1'], + serializationLib: 'siSL', + uri: 'siUri' + ) + ) + } + + } + + ConnectorPartition getDummyPrestoPartition(int i) { + if (i == 0) { + return new ConnectorPartitionDetailImpl( + 'partition', + TupleDomain.none(), + new StorageInfo('siUri', 'siIF', 'siOF', 'siSL', ['sik1': 'siv1'], ['sipk1': 'sipv1']), + null, + new AuditInfo( + 'dwatson', + 'amajumdar', + LocalDateTime.of(2015, Month.APRIL, 8, 4, 33, 47).toInstant(PACIFIC).getEpochSecond(), + LocalDateTime.of(2015, Month.APRIL, 8, 4, 51, 12).toInstant(PACIFIC).getEpochSecond() + ) + ) + } + + } + + def "make sure it is possible to get a converters instance"() { + expect: "Expect converter to be created - if it is not the conversion has java errors in the source generation" + converter + } + + @Unroll + def "test converting '#name' to a QualifiedTableName"() { + given: + def actualResult = null + + when: + try { + actualResult = converter.getQualifiedTableName(name) + } catch (Throwable t) { + actualResult = t + } + + then: + if (expectedResult instanceof Exception) { + assert actualResult.class == expectedResult.class + } else { + actualResult == expectedResult + } + + where: + name | expectedResult + null | new NullPointerException('name cannot be null') + QualifiedName.fromString('catalog') | new IllegalStateException('Not a table definition: catalog') + QualifiedName.fromString('catalog/database') | new IllegalStateException('Not a table definition: catalog/database') + QualifiedName.fromString('catalog/database/table/partition') | new QualifiedTableName('catalog', 'database', 'table') + QualifiedName.fromString('catalog/database/table') | new QualifiedTableName('catalog', 'database', 'tablez') + } + + @Unroll + def "can convert a presto table for '#name' into our TableDTO"() { + given: + + expect: + def actualDto = converter.toTableDto(name, type, prestoTM) + objectsSame(actualDto, expectedDto) + + where: + expectedDto | name | type | prestoTM + null | null | null | null + getDummyTableDto(0) | QualifiedName.fromString('catalog/db/table') | null | getDummyTableMetadata(0) + } + + def "can convert a QualifiedTableName to a QualifiedName"() { + expect: + QualifiedName.fromString('catalog/db/table') == converter.toQualifiedName(new QualifiedTableName('catalog', 'db', 'table')) + } + + @Unroll + def "can convert our tableDto into a presto table for '#name'"() { + given: + + expect: + def actualPrestoTM = converter.fromTableDto(name, tableDto, type) + objectsSame(actualPrestoTM, expectedPrestoTM) + + where: + expectedPrestoTM | name | type | tableDto + getDummyTableMetadata(0) | QualifiedName.fromString('catalog/db/table') | typeManager | getDummyTableDto(0) + } + + @Unroll + def "can convert a presto partition into our PartitionDto"() { + given: + + expect: + def actualDto = converter.toPartitionDto(name, prestoPartition) + objectsSame(actualDto, expectedDto) + + where: + expectedDto | name | prestoPartition + getDummyPartitionDto(0) | QualifiedName.fromString('catalog/db/table/partition') | getDummyPrestoPartition(0) + } +} diff --git a/metacat-hive-connector/build.gradle b/metacat-hive-connector/build.gradle new file mode 100644 index 000000000..a078e6b2b --- /dev/null +++ b/metacat-hive-connector/build.gradle @@ -0,0 +1,11 @@ +dependencies { + compile project(':metacat-common-server') + compile project(':metacat-converters') + compile "com.google.guava:guava:${guava_version}" + compile "com.google.inject:guice:${guice_version}" + compile "com.google.inject.extensions:guice-persist:${guice_version}" + compile "com.google.inject.extensions:guice-multibindings:${guice_version}" + compile "com.google.inject.extensions:guice-servlet:${guice_version}" + compile "com.facebook.presto:presto-spi:${presto_version}" + compile "com.facebook.presto:presto-hive-hadoop2:${presto_version}" +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/BaseMetacatHiveMetastore.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/BaseMetacatHiveMetastore.java new file mode 100644 index 000000000..6c682fd05 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/BaseMetacatHiveMetastore.java @@ -0,0 +1,173 @@ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.exception.InvalidMetaException; +import com.facebook.presto.exception.SchemaAlreadyExistsException; +import com.facebook.presto.hive.ForHiveMetastore; +import com.facebook.presto.hive.HiveClientConfig; +import com.facebook.presto.hive.HiveCluster; +import com.facebook.presto.hive.HiveMetastoreClient; +import com.facebook.presto.hive.TableAlreadyExistsException; +import com.facebook.presto.hive.metastore.CachingHiveMetastore; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaNotFoundException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.TableNotFoundException; +import io.airlift.units.Duration; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; +import org.apache.hadoop.hive.metastore.api.Table; + +import javax.inject.Inject; +import java.util.List; +import java.util.concurrent.ExecutorService; + +import static com.facebook.presto.hive.HiveErrorCode.HIVE_METASTORE_ERROR; + +/** + * Created by amajumdar on 1/16/15. + */ +public class BaseMetacatHiveMetastore extends CachingHiveMetastore implements MetacatHiveMetastore{ + + @Inject + public BaseMetacatHiveMetastore(HiveCluster hiveCluster, + @ForHiveMetastore + ExecutorService executor, HiveClientConfig hiveClientConfig) { + super(hiveCluster, executor, hiveClientConfig); + } + + public BaseMetacatHiveMetastore(HiveCluster hiveCluster, ExecutorService executor, Duration cacheTtl, + Duration refreshInterval) { + super(hiveCluster, executor, cacheTtl, refreshInterval); + } + + public void createDatabase(Database database){ + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + client.create_database(database); + } catch (MetaException | InvalidObjectException e) { + throw new InvalidMetaException("Invalid metadata for " + database.getName(), e); + } catch (AlreadyExistsException e) { + throw new SchemaAlreadyExistsException(database.getName(), e); + } catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public void updateDatabase(Database database){ + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + client.alter_database(database.getName(), database); + } catch (NoSuchObjectException e) { + throw new SchemaNotFoundException(database.getName(), e); + } catch (MetaException | InvalidObjectException e) { + throw new InvalidMetaException("Invalid metadata for " + database.getName(), e); + } catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public void dropDatabase(String dbName) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + client.drop_database(dbName, false, false); + } + catch (NoSuchObjectException e) { + throw new SchemaNotFoundException(dbName); + }catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public void alterTable(final Table table) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + client.alter_table(table.getDbName(), table.getTableName(), table); + } catch (NoSuchObjectException e) { + throw new TableNotFoundException(new SchemaTableName(table.getDbName(), table.getTableName()), e); + } catch (AlreadyExistsException e) { + throw new TableAlreadyExistsException(new SchemaTableName(table.getDbName(), table.getTableName())); + } catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + @Override + public List getTablesByNames(String dbName, List tableNames) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + return client.get_table_objects_by_name( dbName, tableNames); + } catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public List getPartitions(String dbName, String tableName, String filter) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + return client.get_partitions_by_filter( dbName, tableName, filter, (short)0); + } catch (NoSuchObjectException e) { + throw new TableNotFoundException(new SchemaTableName(dbName, tableName), e); + }catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public List getPartitions(String dbName, String tableName, List partitionIds) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + return client.get_partitions_by_names( dbName, tableName, partitionIds); + } catch (NoSuchObjectException e) { + throw new TableNotFoundException(new SchemaTableName(dbName, tableName), e); + }catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + @Override + public void addDropPartitions(String dbName, String tableName, + List partitions, + List delPartitionNames) throws NoSuchObjectException, AlreadyExistsException { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + _dropPartitions( client, dbName, tableName, delPartitionNames); + client.add_partitions(partitions); + } catch (MetaException | InvalidObjectException e) { + throw new InvalidMetaException("One or more partitions are invalid.", e); + } catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public void savePartitions(List partitions) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + client.add_partitions(partitions); + } catch (MetaException | InvalidObjectException e) { + throw new InvalidMetaException("One or more partitions are invalid.", e); + } catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + public void dropPartitions( String dbName, String tableName, List partitionNames) { + try (HiveMetastoreClient client = clientProvider.createMetastoreClient()){ + _dropPartitions( client, dbName, tableName, partitionNames); + } catch (NoSuchObjectException e) { + throw new TableNotFoundException(new SchemaTableName(dbName, tableName), e); + }catch (Exception e) { + throw new PrestoException(HIVE_METASTORE_ERROR, e); + } + } + + private void _dropPartitions(HiveMetastoreClient client, String dbName, String tableName, List partitionNames) + throws Exception { + if( partitionNames != null && !partitionNames.isEmpty()) { + DropPartitionsRequest request = new DropPartitionsRequest(dbName, tableName, new RequestPartsSpec( + RequestPartsSpec._Fields.NAMES, partitionNames)); + request.setDeleteData(false); + client.drop_partitions_req(request); + } + } + + @Override + public void flushCache() { + //no op + } +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/HiveDetailMetadata.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/HiveDetailMetadata.java new file mode 100644 index 000000000..ceeb60096 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/HiveDetailMetadata.java @@ -0,0 +1,378 @@ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.exception.SchemaAlreadyExistsException; +import com.facebook.presto.hive.ForHiveClient; +import com.facebook.presto.hive.HdfsEnvironment; +import com.facebook.presto.hive.HiveClientConfig; +import com.facebook.presto.hive.HiveColumnHandle; +import com.facebook.presto.hive.HiveConnectorId; +import com.facebook.presto.hive.HiveMetadata; +import com.facebook.presto.hive.HivePartitionManager; +import com.facebook.presto.hive.HiveStorageFormat; +import com.facebook.presto.hive.HiveTableHandle; +import com.facebook.presto.hive.metastore.HiveMetastore; +import com.facebook.presto.spi.ColumnDetailMetadata; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorDetailMetadata; +import com.facebook.presto.spi.ConnectorSchemaMetadata; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaNotFoundException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.base.Function; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.netflix.metacat.converters.TypeConverterProvider; +import com.netflix.metacat.hive.connector.util.ConverterUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; + +import javax.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; + +import static com.facebook.presto.hive.HiveErrorCode.HIVE_INVALID_METADATA; +import static com.facebook.presto.hive.HiveErrorCode.HIVE_UNSUPPORTED_FORMAT; +import static com.facebook.presto.hive.HiveUtil.hiveColumnHandles; +import static com.facebook.presto.hive.HiveUtil.schemaTableName; +import static com.facebook.presto.hive.util.Types.checkType; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Strings.isNullOrEmpty; +import static com.google.common.collect.Iterables.concat; +import static com.google.common.collect.Iterables.transform; +import static java.lang.String.format; + +/** + * Created by amajumdar on 2/4/15. + */ +public class HiveDetailMetadata extends HiveMetadata implements ConnectorDetailMetadata { + public static final String PARAMETER_COMMENT = "comment"; + public static final String PARAMETER_EXTERNAL = "EXTERNAL"; + protected final HiveMetastore metastore; + protected final TypeConverterProvider typeConverterProvider; + protected final TypeManager typeManager; + protected final HiveConnectorId connectorId; + protected HiveStorageFormat hiveStorageFormat; + + @Inject + public HiveDetailMetadata(HiveConnectorId connectorId, + HiveClientConfig hiveClientConfig, + HiveMetastore metastore, + HdfsEnvironment hdfsEnvironment, + HivePartitionManager partitionManager, + TypeConverterProvider typeConverterProvider, + @ForHiveClient + ExecutorService executorService, TypeManager typeManager) { + super(connectorId, hiveClientConfig, metastore, hdfsEnvironment, partitionManager, executorService, typeManager); + this.metastore = metastore; + this.typeConverterProvider = typeConverterProvider; + this.typeManager = typeManager; + this.connectorId = connectorId; + } + + @Override + public void createSchema(ConnectorSession session, ConnectorSchemaMetadata schema) { + checkNotNull(schema.getSchemaName(), "Schema name is null"); + try { + Database database = new Database(schema.getSchemaName(), null, schema.getUri(), schema.getMetadata()); + ((MetacatHiveMetastore)metastore).createDatabase(database); + } catch (AlreadyExistsException e) { + throw new SchemaAlreadyExistsException(schema.getSchemaName()); + } + } + + @Override + public void updateSchema(ConnectorSession session, ConnectorSchemaMetadata schema) { + checkNotNull(schema.getSchemaName(), "Schema name is null"); + try { + Database database = new Database(schema.getSchemaName(), null, schema.getUri(), schema.getMetadata()); + ((MetacatHiveMetastore)metastore).updateDatabase(database); + } catch (NoSuchObjectException e) { + throw new SchemaNotFoundException(schema.getSchemaName()); + } + } + + @Override + public void dropSchema(ConnectorSession session, String schemaName) { + checkNotNull( schemaName, "Schema name is null"); + try { + ((MetacatHiveMetastore)metastore).dropDatabase(schemaName); + } catch (NoSuchObjectException e) { + throw new SchemaNotFoundException(schemaName); + } + } + + @Override + public ConnectorSchemaMetadata getSchema(ConnectorSession session, String schemaName) { + checkNotNull( schemaName, "Schema name is null"); + Database database = metastore.getDatabase( schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName)); + return new ConnectorSchemaMetadata( schemaName, database.getLocationUri(), database.getParameters()); + } + + @Override + public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle tableHandle) { + checkNotNull(tableHandle, "tableHandle is null"); + SchemaTableName tableName = schemaTableName(tableHandle); + return getTableMetadata(tableName); + } + + private ConnectorTableDetailMetadata getTableMetadata(SchemaTableName tableName) + { + Optional
oTable = getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()); + Table table = oTable.orElseThrow(() -> new TableNotFoundException(tableName)); + List columns = null; + try { + if (table.getSd().getColsSize() == 0) { + List handles = hiveColumnHandles(typeManager, connectorId.toString(), table, + false); + columns = ImmutableList + .copyOf(transform(handles, columnMetadataGetter(table, typeManager))); + } + } catch (Exception ignored) { + // Ignore the error. It could be that the table is corrupt. + } + + if (columns == null) { + if (table.getSd().getColsSize() != 0) { + columns = ConverterUtil.toColumnMetadatas(table, typeConverterProvider, typeManager); + } else { + columns = Lists.newArrayList(); + } + } + + return new ConnectorTableDetailMetadata(tableName, columns, table.getOwner(), + ConverterUtil.toStorageInfo(table.getSd()), table.getParameters(), + ConverterUtil.toAuditInfo(table)); + } + + static Function columnMetadataGetter(Table table, final TypeManager typeManager) + { + ImmutableMap.Builder builder = ImmutableMap.builder(); + for (FieldSchema field : concat(table.getSd().getCols(), table.getPartitionKeys())) { + if (field.getComment() != null) { + builder.put(field.getName(), field.getComment()); + } + } + final Map columnComment = builder.build(); + + return input -> new ColumnDetailMetadata( + input.getName(), + typeManager.getType(input.getTypeSignature()), + input.isPartitionKey(), + columnComment.get(input.getName()), + false, + input.getHiveType().getHiveTypeName()); + } + + @Override + public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) + { + ConnectorTableDetailMetadata tableDetailMetadata = (ConnectorTableDetailMetadata)tableMetadata; + checkArgument(!isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty"); + + Table table = new Table(); + table.setDbName(tableMetadata.getTable().getSchemaName()); + table.setTableName(tableMetadata.getTable().getTableName()); + table.setOwner(tableMetadata.getOwner()); + table.setTableType(TableType.EXTERNAL_TABLE.toString()); + updateTable( table, session, tableDetailMetadata); + + metastore.createTable(table); + } + + private void updateTable(Table table, ConnectorSession session, ConnectorTableDetailMetadata tableDetailMetadata) { + Map params = table.getParameters(); + if( params == null){ + params = Maps.newHashMap(); + table.setParameters( params); + } + if( tableDetailMetadata.getMetadata() != null) { + Object oComment = tableDetailMetadata.getMetadata().get(PARAMETER_COMMENT); + if (oComment != null) { + params.put(PARAMETER_COMMENT, String.valueOf(oComment)); + } + } + if (params.get(PARAMETER_EXTERNAL) == null) { + params.put(PARAMETER_EXTERNAL, "TRUE"); + } + //storage + StorageDescriptor sd = table.getSd()!= null?table.getSd():new StorageDescriptor(); + String inputFormat = null; + String outputFormat = null; + String location = tableDetailMetadata.getStorageInfo()==null?null:tableDetailMetadata.getStorageInfo().getUri(); + if( location != null){ + sd.setLocation(location); + } else if(sd.getLocation() == null){ + String locationStr = getDatabase(tableDetailMetadata.getTable().getSchemaName()).getLocationUri(); + Path databasePath = new Path(locationStr); + Path targetPath = new Path(databasePath, tableDetailMetadata.getTable().getTableName()); + sd.setLocation(targetPath.toString()); + } + + if( location != null){ + SerDeInfo serdeInfo = sd.getSerdeInfo(); + StorageInfo storageInfo = tableDetailMetadata.getStorageInfo(); + if( serdeInfo != null){ + serdeInfo.setName(tableDetailMetadata.getTable().getTableName()); + if (storageInfo != null) { + if(!Strings.isNullOrEmpty(storageInfo.getSerializationLib())) { + serdeInfo.setSerializationLib(storageInfo.getSerializationLib()); + } + if(storageInfo.getParameters() != null && !storageInfo.getParameters().isEmpty()) { + serdeInfo.setParameters(storageInfo.getParameters()); + } + inputFormat = storageInfo.getInputFormat(); + outputFormat = storageInfo.getOutputFormat(); + } + } else { + serdeInfo = new SerDeInfo(); + serdeInfo.setName(tableDetailMetadata.getTable().getTableName()); + if (storageInfo != null) { + serdeInfo.setSerializationLib(storageInfo.getSerializationLib()); + serdeInfo.setParameters(storageInfo.getParameters()); + inputFormat = storageInfo.getInputFormat(); + outputFormat = storageInfo.getOutputFormat(); + } else { + HiveStorageFormat hiveStorageFormat = extractHiveStorageFormat(table); + serdeInfo.setSerializationLib(hiveStorageFormat.getSerDe()); + serdeInfo.setParameters(ImmutableMap.of()); + inputFormat = hiveStorageFormat.getInputFormat(); + outputFormat = hiveStorageFormat.getOutputFormat(); + } + sd.setSerdeInfo(serdeInfo); + } + } + + ImmutableList.Builder columnsBuilder = ImmutableList.builder(); + ImmutableList.Builder partitionKeysBuilder = ImmutableList.builder(); + for( ColumnMetadata column: tableDetailMetadata.getColumns()){ + FieldSchema field = ConverterUtil.toFieldSchema(column); + if( column.isPartitionKey()){ + partitionKeysBuilder.add(field); + }else { + columnsBuilder.add(field); + } + } + ImmutableList columns = columnsBuilder.build(); + if( !columns.isEmpty()) { + sd.setCols(columns); + } + if( !Strings.isNullOrEmpty(inputFormat)) { + sd.setInputFormat(inputFormat); + } + if( !Strings.isNullOrEmpty(outputFormat)) { + sd.setOutputFormat(outputFormat); + } + if(sd.getParameters() == null) { + sd.setParameters(ImmutableMap.of()); + } + + //partition keys + ImmutableList partitionKeys = partitionKeysBuilder.build(); + if( !partitionKeys.isEmpty()) { + table.setPartitionKeys(partitionKeys); + } + table.setSd(sd); + } + + private static HiveStorageFormat extractHiveStorageFormat(Table table) + { + StorageDescriptor descriptor = table.getSd(); + if (descriptor == null) { + throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor"); + } + SerDeInfo serdeInfo = descriptor.getSerdeInfo(); + if (serdeInfo == null) { + throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info"); + } + String outputFormat = descriptor.getOutputFormat(); + String serializationLib = serdeInfo.getSerializationLib(); + + for (HiveStorageFormat format : HiveStorageFormat.values()) { + if (format.getOutputFormat().equals(outputFormat) && format.getSerDe().equals(serializationLib)) { + return format; + } + } + throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Output format %s with SerDe %s is not supported", outputFormat, serializationLib)); + } + + @Override + public ConnectorTableHandle alterTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) { + Optional
oTable = metastore.getTable(tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName()); + Table table = oTable.orElseThrow(() -> new TableNotFoundException(tableMetadata.getTable())); + try { + if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name())) { + throw new TableNotFoundException(tableMetadata.getTable()); + } + updateTable(table, session, (ConnectorTableDetailMetadata) tableMetadata); + ((MetacatHiveMetastore)metastore).alterTable(table); + return new HiveTableHandle(connectorId.toString(), tableMetadata.getTable().getSchemaName(), tableMetadata.getTable().getTableName()); + } + catch (NoSuchObjectException e) { + throw new TableNotFoundException(tableMetadata.getTable()); + } + } + + /** + * Deletes the table from hive. If the table is corrupted, it will still delete it instead of throwing an error. + * @param tableHandle table + */ + @Override + public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) + { + HiveTableHandle handle = checkType(tableHandle, HiveTableHandle.class, "tableHandle"); + metastore.dropTable(handle.getSchemaName(), handle.getTableName()); + } + + /** + * Similar to listTables but this method will return the list of tables along with its metadata. + * @param session connector session + * @param schemaName schema name + * @return list of table metadata. + */ + @Override + public List listTableMetadatas(ConnectorSession session, String schemaName, List tableNames) { + List
tables = ((MetacatHiveMetastore)metastore).getTablesByNames( schemaName, tableNames); + if( tables != null){ + return tables.stream().map(table -> { + List columns; + if( table.getSd().getColsSize() == 0) { + List handles = hiveColumnHandles(typeManager, connectorId.toString(), table, false); + columns = ImmutableList + .copyOf(transform(handles, columnMetadataGetter(table, typeManager))); + } else { + columns = ConverterUtil.toColumnMetadatas(table, typeConverterProvider, typeManager); + } + SchemaTableName tableName = new SchemaTableName( schemaName, table.getTableName()); + return new ConnectorTableDetailMetadata(tableName, columns, table.getOwner(), + ConverterUtil.toStorageInfo(table.getSd()), Maps.newHashMap(), ConverterUtil.toAuditInfo(table)); + }).collect(Collectors.toList()); + } + return Lists.newArrayList(); + } + + private Database getDatabase(String database) + { + return metastore.getDatabase(database).orElseThrow(() -> new SchemaNotFoundException(database)); + } +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/HiveSplitDetailManager.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/HiveSplitDetailManager.java new file mode 100644 index 000000000..da972d405 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/HiveSplitDetailManager.java @@ -0,0 +1,286 @@ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.exception.PartitionAlreadyExistsException; +import com.facebook.presto.exception.PartitionNotFoundException; +import com.facebook.presto.hive.DirectoryLister; +import com.facebook.presto.hive.ForHiveClient; +import com.facebook.presto.hive.HdfsEnvironment; +import com.facebook.presto.hive.HiveClientConfig; +import com.facebook.presto.hive.HiveConnectorId; +import com.facebook.presto.hive.HiveSplitManager; +import com.facebook.presto.hive.HiveUtil; +import com.facebook.presto.hive.NamenodeStats; +import com.facebook.presto.hive.metastore.HiveMetastore; +import com.facebook.presto.spi.AuditInfo; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionDetail; +import com.facebook.presto.spi.ConnectorPartitionDetailImpl; +import com.facebook.presto.spi.ConnectorPartitionResult; +import com.facebook.presto.spi.ConnectorSplitDetailManager; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.SavePartitionResult; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.Sort; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.TupleDomain; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.netflix.metacat.common.partition.util.PartitionUtil; +import com.netflix.metacat.hive.connector.util.ConverterUtil; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; + +import javax.inject.Inject; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; + +import static com.facebook.presto.hive.HiveUtil.schemaTableName; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 2/4/15. + */ +public class HiveSplitDetailManager extends HiveSplitManager implements ConnectorSplitDetailManager{ + + protected final HiveMetastore metastore; + @Inject + public HiveSplitDetailManager(HiveConnectorId connectorId, + HiveClientConfig hiveClientConfig, + HiveMetastore metastore, + NamenodeStats namenodeStats, HdfsEnvironment hdfsEnvironment, + DirectoryLister directoryLister, + @ForHiveClient + ExecutorService executorService) { + super(connectorId, hiveClientConfig, metastore, namenodeStats, hdfsEnvironment, directoryLister, + executorService); + this.metastore = metastore; + } + + @Override + public ConnectorPartitionResult getPartitions(ConnectorTableHandle table, final String filterExpression + , List partitionIds, Sort sort, Pageable pageable, boolean includePartitionDetails) { + SchemaTableName schemaTableName = HiveUtil.schemaTableName(table); + List partitions = getPartitions( schemaTableName, filterExpression + , partitionIds, sort, pageable, includePartitionDetails); + return new ConnectorPartitionResult( partitions, TupleDomain.none()); + } + + private List getPartitions(SchemaTableName schemaTableName, String filterExpression, + List partitionIds, + Sort sort, Pageable pageable, + boolean includePartitionDetails) { + List result = Lists.newArrayList(); + List queryPartitionIds = Lists.newArrayList(); + if (!Strings.isNullOrEmpty(filterExpression)) { + queryPartitionIds = metastore + .getPartitionNamesByParts(schemaTableName.getSchemaName(), schemaTableName.getTableName(), + Lists.newArrayList(PartitionUtil.getPartitionKeyValues(filterExpression).values())).orElse(Lists.newArrayList()); + } + if (partitionIds != null) { + queryPartitionIds.addAll(partitionIds); + } else { + queryPartitionIds.addAll(metastore.getPartitionNames(schemaTableName.getSchemaName(), + schemaTableName.getTableName()).orElse(Lists.newArrayList())); + } + Map partitionMap = getPartitionsByNames( + schemaTableName.getSchemaName(), schemaTableName.getTableName(), + queryPartitionIds); + Map> domainMap = Maps.newHashMapWithExpectedSize(1); + domainMap.put(new ColumnHandle(){}, "ignore"); + TupleDomain tupleDomain = TupleDomain.withFixedValues(domainMap); + partitionMap.forEach((s, partition) -> { + StorageDescriptor sd = partition.getSd(); + StorageInfo storageInfo = null; + if( sd != null){ + storageInfo = new StorageInfo(); + storageInfo.setUri(sd.getLocation()); + storageInfo.setInputFormat(sd.getInputFormat()); + storageInfo.setOutputFormat(sd.getOutputFormat()); + storageInfo.setParameters(sd.getParameters()); + SerDeInfo serDeInfo = sd.getSerdeInfo(); + if (serDeInfo != null) { + storageInfo.setSerializationLib(serDeInfo.getSerializationLib()); + storageInfo.setSerdeInfoParameters(serDeInfo.getParameters()); + } + } + AuditInfo auditInfo = new AuditInfo(); + auditInfo.setCreatedDate((long)partition.getCreateTime()); + auditInfo.setLastUpdatedDate((long) partition.getLastAccessTime()); + result.add( new ConnectorPartitionDetailImpl(s, tupleDomain, storageInfo, partition.getParameters(), auditInfo)); + }); + return result; + } + + protected Map getPartitionsByNames(String schemaName, String tableName, List partitionNames) + { + return metastore.getPartitionsByNames(schemaName, tableName, partitionNames).orElse(Maps.newHashMap()); + } + + @Override + public SavePartitionResult savePartitions(ConnectorTableHandle tableHandle, List partitions + , List partitionIdsForDeletes, boolean checkIfExists) { + checkNotNull(tableHandle, "tableHandle is null"); + SavePartitionResult result = new SavePartitionResult(); + SchemaTableName tableName = schemaTableName(tableHandle); + Optional
oTable = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); + Table table = oTable.orElseThrow(() -> new TableNotFoundException(tableName)); + try { + // New partition ids + List addedPartitionIds = Lists.newArrayList(); + // Updated partition ids + List existingPartitionIds = Lists.newArrayList(); + // New partitions + List hivePartitions = Lists.newArrayList(); + // Existing partition map + Map existingPartitionMap = Maps.newHashMap(); + if( checkIfExists) { + List partitionNames = partitions.stream().map( + partition -> { + String partitionName = partition.getPartitionId(); + PartitionUtil + .validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys())); + return partitionName; + }).collect(Collectors.toList()); + existingPartitionMap = getPartitionsByNames(tableName.getSchemaName(), tableName.getTableName(), + partitionNames); + } + for(ConnectorPartition partition:partitions){ + String partitionName = partition.getPartitionId(); + Partition hivePartition = existingPartitionMap.get(partitionName); + if(hivePartition == null){ + addedPartitionIds.add(partitionName); + hivePartitions.add(ConverterUtil.toPartition(tableName, partition)); + } else { + ConnectorPartitionDetail partitionDetail = (ConnectorPartitionDetail) partition; + String partitionUri = getUri(partitionDetail); + String hivePartitionUri = getUri(hivePartition); + if( partitionUri == null || !partitionUri.equals( hivePartitionUri)){ + existingPartitionIds.add(partitionName); + hivePartitions.add(ConverterUtil.toPartition(tableName, partition)); + } + } + } + Set deletePartitionIds = Sets.newHashSet(); + deletePartitionIds.addAll(existingPartitionIds); + if( partitionIdsForDeletes != null){ + deletePartitionIds.addAll(partitionIdsForDeletes); + } + + // + // Update the partition info based on that of the table. + // + for (Partition partition : hivePartitions) { + StorageDescriptor sd = partition.getSd(); + StorageDescriptor tableSdCopy = table.getSd().deepCopy(); + if (tableSdCopy.getSerdeInfo() == null) { + SerDeInfo serDeInfo = new SerDeInfo(null, null, Collections.emptyMap()); + tableSdCopy.setSerdeInfo(serDeInfo); + } + + tableSdCopy.setLocation(sd.getLocation()); + if (!Strings.isNullOrEmpty(sd.getInputFormat())) { + tableSdCopy.setInputFormat(sd.getInputFormat()); + } + if (!Strings.isNullOrEmpty(sd.getOutputFormat())) { + tableSdCopy.setOutputFormat(sd.getOutputFormat()); + } + if (sd.getParameters() != null) { + tableSdCopy.setParameters(sd.getParameters()); + } + if (sd.getSerdeInfo() != null) { + if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getName())) { + tableSdCopy.getSerdeInfo().setName(sd.getSerdeInfo().getName()); + } + if (!Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) { + tableSdCopy.getSerdeInfo().setSerializationLib(sd.getSerdeInfo().getSerializationLib()); + } + if (sd.getSerdeInfo().getParameters() != null) { + tableSdCopy.getSerdeInfo().setParameters(sd.getSerdeInfo().getParameters()); + } + } + partition.setSd(tableSdCopy); + } + + ((MetacatHiveMetastore)metastore).addDropPartitions(tableName.getSchemaName() + , tableName.getTableName(), hivePartitions, Lists.newArrayList(deletePartitionIds)); + + result.setAdded( addedPartitionIds); + result.setUpdated( existingPartitionIds); + } catch (NoSuchObjectException e) { + throw new TableNotFoundException(tableName); + } catch (AlreadyExistsException e) { + throw new PartitionAlreadyExistsException(tableName, null, e); + } + return result; + } + + private String getUri(Partition hivePartition) { + String result = null; + if( hivePartition.getSd() != null){ + result = hivePartition.getSd().getLocation(); + } + return result; + } + + private String getUri(ConnectorPartitionDetail partitionDetail) { + String result = null; + if( partitionDetail.getStorageInfo() != null){ + result = partitionDetail.getStorageInfo().getUri(); + } + return result; + } + + public List getPartitionKeys(List fields) { + List result = Lists.newArrayList(); + if (fields != null) { + result.addAll(fields.stream().map(FieldSchema::getName).collect(Collectors.toList())); + } + return result; + } + + /** + * Delete partitions for a table + + * @param tableHandle table handle + * @param partitionIds list of partition names + */ + @Override + public void deletePartitions(ConnectorTableHandle tableHandle, List partitionIds) { + if (!(metastore instanceof MetacatHiveMetastore)) { + throw new IllegalStateException("This metastore does not implement dropPartitions"); + } + checkNotNull(tableHandle, "tableHandle is null"); + SchemaTableName tableName = schemaTableName(tableHandle); + try { + ((MetacatHiveMetastore)metastore).dropPartitions( tableName.getSchemaName(), tableName.getTableName(), partitionIds); + } catch (NoSuchObjectException e) { + throw new PartitionNotFoundException(tableName, partitionIds.toString()); + } + } + + /** + * Number of partitions for the given table + * @param connectorHandle table handle + * @return Number of partitions + */ + @Override + public Integer getPartitionCount(ConnectorTableHandle connectorHandle) { + SchemaTableName schemaTableName = HiveUtil.schemaTableName(connectorHandle); + return metastore.getPartitionNames(schemaTableName.getSchemaName(), schemaTableName.getTableName()).orElse(Lists.newArrayList()).size(); + } +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveClientModule.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveClientModule.java new file mode 100644 index 000000000..bddd84748 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveClientModule.java @@ -0,0 +1,26 @@ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.hive.NoAccessControl; +import com.facebook.presto.hive.metastore.HiveMetastore; +import com.facebook.presto.spi.ConnectorMetadata; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.security.ConnectorAccessControl; +import com.google.inject.Binder; +import com.google.inject.Module; +import com.google.inject.Scopes; + +/** + * Created by amajumdar on 4/17/15. + */ +public class MetacatHiveClientModule implements Module { + + @Override + public void configure(Binder binder) { + binder.bind(HiveMetastore.class).to(BaseMetacatHiveMetastore.class).in(Scopes.SINGLETON); + + binder.bind(ConnectorMetadata.class).to(HiveDetailMetadata.class).in(Scopes.SINGLETON); + binder.bind(ConnectorSplitManager.class).to(HiveSplitDetailManager.class).in(Scopes.SINGLETON); + + binder.bind(ConnectorAccessControl.class).to(NoAccessControl.class).in(Scopes.SINGLETON); + } +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveConnectorFactory.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveConnectorFactory.java new file mode 100644 index 000000000..4f5e2a972 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveConnectorFactory.java @@ -0,0 +1,117 @@ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.hive.HiveClientModule; +import com.facebook.presto.hive.HiveConnector; +import com.facebook.presto.hive.HiveSessionProperties; +import com.facebook.presto.hive.HiveTableProperties; +import com.facebook.presto.hive.RebindSafeMBeanServer; +import com.facebook.presto.spi.Connector; +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.ConnectorHandleResolver; +import com.facebook.presto.spi.ConnectorMetadata; +import com.facebook.presto.spi.ConnectorPageSourceProvider; +import com.facebook.presto.spi.ConnectorRecordSinkProvider; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.classloader.ThreadContextClassLoader; +import com.facebook.presto.spi.security.ConnectorAccessControl; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableSet; +import com.google.inject.Injector; +import com.google.inject.Module; +import com.google.inject.util.Modules; +import com.netflix.metacat.common.server.CommonModule; +import com.netflix.metacat.converters.ConvertersModule; +import io.airlift.bootstrap.Bootstrap; +import io.airlift.bootstrap.LifeCycleManager; +import io.airlift.json.JsonModule; +import io.airlift.node.NodeModule; +import org.weakref.jmx.guice.MBeanModule; + +import javax.management.MBeanServer; +import java.lang.management.ManagementFactory; +import java.util.Map; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Strings.isNullOrEmpty; + +/** + * Created by amajumdar on 1/21/15. + */ +public class MetacatHiveConnectorFactory implements ConnectorFactory { + private final String name; + private final Map optionalConfig; + private final ClassLoader classLoader; + private final TypeManager typeManager; + + public MetacatHiveConnectorFactory(String name, Map optionalConfig, ClassLoader classLoader, + TypeManager typeManager) + { + checkArgument(!isNullOrEmpty(name), "name is null or empty"); + this.name = name; + this.optionalConfig = checkNotNull(optionalConfig, "optionalConfig is null"); + this.classLoader = checkNotNull(classLoader, "classLoader is null"); + this.typeManager = checkNotNull(typeManager, "typeManager is null"); + } + + @Override + public String getName() + { + return name; + } + + @Override + public Connector create(String connectorId, Map config) + { + checkNotNull(config, "config is null"); + + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { + HiveClientModule hiveClientModule = new HiveClientModule(connectorId, null, typeManager); + MetacatHiveClientModule metacatHiveClientModule = new MetacatHiveClientModule(); + Module module = Modules.override(hiveClientModule).with(metacatHiveClientModule); + Bootstrap app = new Bootstrap( + new NodeModule(), + new MBeanModule(), + new JsonModule(), + new CommonModule(), + new ConvertersModule(), + module, + binder -> { + MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer(); + binder.bind(MBeanServer.class).toInstance(new RebindSafeMBeanServer(platformMBeanServer)); + } + ); + + Injector injector = app + .doNotInitializeLogging() + .setRequiredConfigurationProperties(config) + .setOptionalConfigurationProperties(optionalConfig) + .initialize(); + LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class); + ConnectorMetadata metadata = injector.getInstance(ConnectorMetadata.class); + ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class); + ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class); + ConnectorRecordSinkProvider recordSinkProvider = injector.getInstance(ConnectorRecordSinkProvider.class); + ConnectorHandleResolver handleResolver = injector.getInstance(ConnectorHandleResolver.class); + HiveSessionProperties hiveSessionProperties = injector.getInstance(HiveSessionProperties.class); + HiveTableProperties hiveTableProperties = injector.getInstance(HiveTableProperties.class); + ConnectorAccessControl accessControl = injector.getInstance(ConnectorAccessControl.class); + + return new HiveConnector( + lifeCycleManager, + metadata, + splitManager, + connectorPageSource, + recordSinkProvider, + handleResolver, + ImmutableSet.of(), + hiveSessionProperties.getSessionProperties(), + hiveTableProperties.getTableProperties(), + accessControl); + } + catch (Throwable e) { + throw Throwables.propagate(e); + } + } +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveMetastore.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveMetastore.java new file mode 100644 index 000000000..4bc4678fe --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHiveMetastore.java @@ -0,0 +1,85 @@ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.hive.metastore.HiveMetastore; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; + +import java.util.List; + +/** + * Created by amajumdar on 4/20/15. + */ +public interface MetacatHiveMetastore extends HiveMetastore { + /** + * Create schema/database + * @param database database metadata + */ + void createDatabase(Database database) throws AlreadyExistsException; + + /** + * Update schema/database + * @param database database metadata + */ + void updateDatabase(Database database) throws NoSuchObjectException; + + /** + * Drop database + * @param dbName database name + */ + void dropDatabase(String dbName) throws NoSuchObjectException; + + /** + * Alter the given table + * @param table the table name + */ + void alterTable(final Table table) throws NoSuchObjectException; + + /** + * Returns the list of tables + * @param dbName database name + * @param tableNames list of table names + * @return list of tables + */ + List
getTablesByNames(String dbName, List tableNames); + + /** + * Get partitions for the given database and table name using the filter expression + * @param dbName database name + * @param tableName table name + * @param filter filter expression (JSP comparable expression) + * @return list of partitions + */ + List getPartitions(String dbName, String tableName, String filter) throws NoSuchObjectException; + + /** + * Get partitions for the list of partition names under the given database and table name. + * @param dbName database name + * @param tableName table name + * @param partitionIds partition ids/names + * @return list of partitions + */ + List getPartitions(String dbName, String tableName, List partitionIds) throws NoSuchObjectException; + + /** + * Saves the partitions. + * @param partitions list of partitions + */ + void savePartitions(List partitions) throws NoSuchObjectException, AlreadyExistsException; + + /** + * Saves the partitions. Deletes the partitions with the given delPartitionNames + * @param partitions list of partitions + */ + void addDropPartitions(String dbName, String tableName, List partitions, List delPartitionNames) throws NoSuchObjectException, AlreadyExistsException; + + /** + * Drops the partition for the given database, table and partition name. + * @param dbName database name + * @param tableName table name + * @param partitionNames partition ids/names + */ + void dropPartitions( String dbName, String tableName, List partitionNames) throws NoSuchObjectException; +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHivePlugin.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHivePlugin.java new file mode 100644 index 000000000..190fe3a16 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/MetacatHivePlugin.java @@ -0,0 +1,67 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.hive.connector; + +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.Plugin; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import javax.inject.Inject; +import java.util.List; +import java.util.Map; + +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; +import static com.google.common.base.Preconditions.checkNotNull; + +public class MetacatHivePlugin implements Plugin +{ + private Map optionalConfig = ImmutableMap.of(); + private TypeManager typeManager; + + @Inject + public void setTypeManager(TypeManager typeManager) + { + this.typeManager = checkNotNull(typeManager, "typeManager is null"); + } + + @Override + public void setOptionalConfig(Map optionalConfig) + { + this.optionalConfig = ImmutableMap.copyOf(checkNotNull(optionalConfig, "optionalConfig is null")); + } + + @Override + public List getServices(Class type) + { + if (type == ConnectorFactory.class) { + return ImmutableList.of(type.cast(new MetacatHiveConnectorFactory("metacat-hive", optionalConfig, getClassLoader(), typeManager))); + } else if (type == Type.class){ + return ImmutableList.of(type.cast(FLOAT), type.cast(INT)); + } + return ImmutableList.of(); + } + + private static ClassLoader getClassLoader() + { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = MetacatHivePlugin.class.getClassLoader(); + } + return classLoader; + } +} diff --git a/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/util/ConverterUtil.java b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/util/ConverterUtil.java new file mode 100644 index 000000000..4d9d3f717 --- /dev/null +++ b/metacat-hive-connector/src/main/java/com/netflix/metacat/hive/connector/util/ConverterUtil.java @@ -0,0 +1,173 @@ +package com.netflix.metacat.hive.connector.util; + +import com.facebook.presto.hive.HiveType; +import com.facebook.presto.spi.AuditInfo; +import com.facebook.presto.spi.ColumnDetailMetadata; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionDetail; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.partition.util.PartitionUtil; +import com.netflix.metacat.converters.TypeConverterProvider; +import com.netflix.metacat.converters.impl.HiveTypeConverter; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Created by amajumdar on 2/4/15. + */ +public class ConverterUtil { + private static final Logger log = LoggerFactory.getLogger(ConverterUtil.class); + + public static StorageInfo toStorageInfo(StorageDescriptor sd){ + StorageInfo result = null; + if( sd != null) { + result = new StorageInfo(); + result.setUri( sd.getLocation()); + result.setInputFormat(sd.getInputFormat()); + result.setOutputFormat(sd.getOutputFormat()); + SerDeInfo serde = sd.getSerdeInfo(); + if( serde != null){ + result.setSerializationLib(serde.getSerializationLib()); + result.setSerdeInfoParameters(serde.getParameters()); + } + result.setParameters(sd.getParameters()); + } + return result; + } + + public static StorageDescriptor fromStorageInfo(StorageInfo storageInfo){ + StorageDescriptor result = null; + if( storageInfo != null) { + result = new StorageDescriptor(); + result.setInputFormat( storageInfo.getInputFormat()); + result.setLocation(storageInfo.getUri()); + result.setOutputFormat(storageInfo.getOutputFormat()); + result.setParameters(storageInfo.getParameters()); + result.setSerdeInfo( new SerDeInfo(null, storageInfo.getSerializationLib(), storageInfo.getSerdeInfoParameters())); + } + return result; + } + + public static List toFieldSchemas(ConnectorTableDetailMetadata tableDetailMetadata) { + ImmutableList.Builder columns = ImmutableList.builder(); + for( ColumnMetadata column: tableDetailMetadata.getColumns()){ + columns.add( toFieldSchema( column)); + } + return columns.build(); + } + + public static FieldSchema toFieldSchema(ColumnMetadata column) { + return new FieldSchema(column.getName(), HiveType.toHiveType(column.getType()).getHiveTypeName(), column.getComment()); + } + + public static AuditInfo toAuditInfo(Table table) { + AuditInfo result = new AuditInfo(); + result.setCreatedBy(table.getOwner()); + result.setCreatedDate((long) table.getCreateTime()); + Map parameters = table.getParameters(); + if( parameters != null) { + result.setLastUpdatedBy(parameters.get("last_modified_by")); + Long lastModifiedDate = null; + try{ + lastModifiedDate = Long.valueOf(parameters.get("last_modified_time")); + }catch(Exception ignored){ + + } + result.setLastUpdatedDate(lastModifiedDate); + } + return result; + } + + public static Optional toColumnMetadata(FieldSchema field, TypeConverterProvider typeConverterProvider, TypeManager typeManager, int index, boolean isPartitionKey) { + String fieldType = field.getType(); + HiveTypeConverter hiveTypeConverter = (HiveTypeConverter) typeConverterProvider.get(MetacatContext.DATA_TYPE_CONTEXTS.hive); + Type type = hiveTypeConverter.toType(fieldType, typeManager); + if (type == null) { + log.debug("Unable to convert type '{}' for field '{}' to a hive type", fieldType, field.getName()); + return Optional.empty(); + } + ColumnDetailMetadata metadata = new ColumnDetailMetadata(field.getName(), type, isPartitionKey, + field.getComment(), false, fieldType); + return Optional.of(metadata); + } + + public static List toColumnMetadatas(Table table, TypeConverterProvider typeConverterProvider, TypeManager typeManager) { + List result = Lists.newArrayList(); + StorageDescriptor sd = table.getSd(); + int index = 0; + if( sd != null) { + List fields = table.getSd().getCols(); + for (FieldSchema field : fields) { + Optional columnMetadata = toColumnMetadata(field, typeConverterProvider, typeManager, index, false); + // Ignore unsupported types rather than failing + if (columnMetadata.isPresent()) { + index++; + result.add(columnMetadata.get()); + } + } + } + List pFields = table.getPartitionKeys(); + if( pFields != null) { + for (FieldSchema pField : pFields) { + Optional columnMetadata = toColumnMetadata(pField, typeConverterProvider, typeManager, index, true); + // Ignore unsupported types rather than failing + if (columnMetadata.isPresent()) { + index++; + result.add(columnMetadata.get()); + } + } + } + return result; + } + + public static List toPartitions(SchemaTableName tableName, List partitions) { + return partitions.stream().map(partition -> ConverterUtil.toPartition(tableName, partition)).collect( + Collectors.toList()); + } + + public static Partition toPartition(SchemaTableName tableName, ConnectorPartition connectorPartition) { + Partition result = new Partition(); + ConnectorPartitionDetail connectorPartitionDetail = (ConnectorPartitionDetail) connectorPartition; + result.setValues(Lists.newArrayList( + PartitionUtil.getPartitionKeyValues(connectorPartitionDetail.getPartitionId()).values())); + result.setDbName( tableName.getSchemaName()); + result.setTableName( tableName.getTableName()); + result.setSd(fromStorageInfo(connectorPartitionDetail.getStorageInfo())); + result.setParameters(connectorPartitionDetail.getMetadata()); + AuditInfo auditInfo = connectorPartitionDetail.getAuditInfo(); + if( auditInfo != null){ + Long createdDate = auditInfo.getCreatedDate(); + int currentTime = (int) (System.currentTimeMillis() / 1000); + if( createdDate != null){ + result.setCreateTime( createdDate.intValue()); + } else { + result.setCreateTime(currentTime); + } + Long lastUpdatedDate = auditInfo.getLastUpdatedDate(); + if( lastUpdatedDate != null){ + result.setLastAccessTime( lastUpdatedDate.intValue()); + }else { + result.setLastAccessTime(currentTime); + } + } + return result; + } +} diff --git a/metacat-hive-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin b/metacat-hive-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin new file mode 100644 index 000000000..5afe61493 --- /dev/null +++ b/metacat-hive-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin @@ -0,0 +1 @@ +com.netflix.metacat.hive.connector.MetacatHivePlugin \ No newline at end of file diff --git a/metacat-main/build.gradle b/metacat-main/build.gradle new file mode 100644 index 000000000..d71bcc8c3 --- /dev/null +++ b/metacat-main/build.gradle @@ -0,0 +1,36 @@ +dependencies { + compile project(':metacat-converters') + compile project(':metacat-hive-connector') + compile project(':metacat-mysql-connector') + compile project(':metacat-postgres-connector') + compile project(':metacat-s3-connector') + compile project(':metacat-common-server') + compile project(':metacat-thrift') + testCompile project(':metacat-user-metadata-mysql') + + compile 'org.elasticsearch:elasticsearch:1.7.1' + compile 'com.github.rholder:guava-retrying:2.0.0' + + compile "com.facebook.presto:presto-main:${presto_version}" + compile "com.facebook.presto:presto-hive:${presto_version}" + compile "com.facebook.presto:presto-mysql:${presto_version}" + compile "com.facebook.presto:presto-postgresql:${presto_version}" + compile "com.facebook.presto:presto-cassandra:${presto_version}" + compile "com.facebook.presto:presto-example-http:${presto_version}" + + compile "com.google.inject:guice:${guice_version}" + compile "com.google.inject.extensions:guice-persist:${guice_version}" + compile "com.google.inject.extensions:guice-multibindings:${guice_version}" + compile "com.google.inject.extensions:guice-servlet:${guice_version}" + + testCompile 'io.airlift:testing-mysql-server:0.1' + testCompile project(':metacat-common').sourceSets.test.output +} + +test { + systemProperty 'metacat.plugin.config.location', 'src/test/resources/etc/catalog' + systemProperty 'metacat.usermetadata.config.location', 'src/test/resources/usermetadata.properties' + testLogging { + exceptionFormat = 'full' + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/IndexResource.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/IndexResource.java new file mode 100644 index 000000000..6ea5ded8f --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/IndexResource.java @@ -0,0 +1,13 @@ +package com.netflix.metacat.main.api; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.core.Response; + +@Path("/") +public class IndexResource { + @GET + public Response index() { + return Response.ok().build(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/MetacatV1Resource.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/MetacatV1Resource.java new file mode 100644 index 000000000..aa61ad387 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/MetacatV1Resource.java @@ -0,0 +1,300 @@ +package com.netflix.metacat.main.api; + +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.TableNotFoundException; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.NameDateDto; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.api.MetacatV1; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.CatalogMappingDto; +import com.netflix.metacat.common.dto.CreateCatalogDto; +import com.netflix.metacat.common.dto.DatabaseCreateRequestDto; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.exception.MetacatNotFoundException; +import com.netflix.metacat.common.exception.MetacatNotSupportedException; +import com.netflix.metacat.common.server.events.MetacatCreateDatabasePostEvent; +import com.netflix.metacat.common.server.events.MetacatCreateDatabasePreEvent; +import com.netflix.metacat.common.server.events.MetacatCreateMViewPostEvent; +import com.netflix.metacat.common.server.events.MetacatCreateMViewPreEvent; +import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatCreateTablePreEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePreEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteMViewPostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteMViewPreEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteTablePreEvent; +import com.netflix.metacat.common.server.events.MetacatEventBus; +import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatRenameTablePreEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePostEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateDatabasePreEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateMViewPostEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateMViewPreEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateTablePreEvent; +import com.netflix.metacat.common.util.MetacatContextManager; +import com.netflix.metacat.main.services.CatalogService; +import com.netflix.metacat.main.services.DatabaseService; +import com.netflix.metacat.main.services.MViewService; +import com.netflix.metacat.main.services.TableService; +import com.wordnik.swagger.annotations.ApiParam; + +import javax.inject.Inject; +import java.util.List; +import java.util.Optional; +import java.util.function.Supplier; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.netflix.metacat.main.api.RequestWrapper.qualifyName; +import static com.netflix.metacat.main.api.RequestWrapper.requestWrapper; + +public class MetacatV1Resource implements MetacatV1 { + private final CatalogService catalogService; + private final DatabaseService databaseService; + private final MetacatEventBus eventBus; + private final MViewService mViewService; + private final TableService tableService; + + @Inject + public MetacatV1Resource( + CatalogService catalogService, + DatabaseService databaseService, + MetacatEventBus eventBus, + MViewService mViewService, + TableService tableService) { + this.catalogService = catalogService; + this.databaseService = databaseService; + this.eventBus = eventBus; + this.mViewService = mViewService; + this.tableService = tableService; + } + + @Override + public void createCatalog(CreateCatalogDto createCatalogDto) { + throw new MetacatNotSupportedException(); + } + + @Override + public void createDatabase(String catalogName, String databaseName, + DatabaseCreateRequestDto databaseCreateRequestDto) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofDatabase(catalogName, databaseName)); + requestWrapper(name, "createDatabase", () -> { + eventBus.post(new MetacatCreateDatabasePreEvent(name, metacatContext)); + + databaseService.create(name, databaseCreateRequestDto); + + DatabaseDto dto = databaseService.get(name, databaseCreateRequestDto.getDefinitionMetadata() != null); + eventBus.post(new MetacatCreateDatabasePostEvent(dto, metacatContext)); + return null; + }); + } + + @Override + public TableDto createMView(String catalogName, + String databaseName, + String tableName, + String viewName, + Boolean snapshot, + String filter) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "createMView", () -> { + eventBus.post(new MetacatCreateMViewPreEvent(name, snapshot, filter, metacatContext)); + + TableDto dto = mViewService.create(name); + if (snapshot != null && snapshot) { + mViewService.snapshotPartitions(name, filter); + } + + eventBus.post(new MetacatCreateMViewPostEvent(dto, snapshot, filter, metacatContext)); + return dto; + }); + } + + @Override + public TableDto createTable(String catalogName, String databaseName, String tableName, TableDto table) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "createTable", () -> { + checkArgument(table != null, "Table cannot be null"); + checkArgument(tableName != null && !tableName.isEmpty(), "table name is required"); + checkArgument(table.getName() != null && tableName.equals(table.getName().getTableName()), + "Table name does not match the name in the table"); + + eventBus.post(new MetacatCreateTablePreEvent(name, metacatContext)); + + tableService.create(name, table); + + TableDto dto = tableService.get(name, true).orElseThrow(() -> new IllegalStateException("Should exist")); + eventBus.post(new MetacatCreateTablePostEvent(dto, metacatContext)); + return dto; + }); + } + + @Override + public void deleteDatabase(String catalogName, String databaseName) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofDatabase(catalogName, databaseName)); + requestWrapper(name, "deleteDatabase", () -> { + DatabaseDto dto = databaseService.get(name, true); + eventBus.post(new MetacatDeleteDatabasePreEvent(dto, metacatContext)); + + databaseService.delete(name); + + eventBus.post(new MetacatDeleteDatabasePostEvent(dto, metacatContext)); + return null; + }); + } + + @Override + public TableDto deleteMView(String catalogName, String databaseName, String tableName, String viewName) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "deleteMView", () -> { + eventBus.post(new MetacatDeleteMViewPreEvent(name, metacatContext)); + + TableDto dto = mViewService.delete(name); + + eventBus.post(new MetacatDeleteMViewPostEvent(dto, metacatContext)); + return dto; + }); + } + + @Override + public TableDto deleteTable(String catalogName, String databaseName, String tableName) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "deleteTable", () -> { + eventBus.post(new MetacatDeleteTablePreEvent(name, metacatContext)); + + TableDto dto = tableService.delete(name); + + eventBus.post(new MetacatDeleteTablePostEvent(dto, metacatContext)); + return dto; + }); + } + + @Override + public CatalogDto getCatalog(String catalogName) { + QualifiedName name = qualifyName(() -> QualifiedName.ofCatalog(catalogName)); + return requestWrapper(name, "getCatalog", () -> catalogService.get(name)); + } + + @Override + public List getCatalogNames() { + QualifiedName name = QualifiedName.ofCatalog("getCatalogNames"); + return requestWrapper(name, "getCatalogNames", catalogService::getCatalogNames); + } + + @Override + public DatabaseDto getDatabase(String catalogName, String databaseName, Boolean includeUserMetadata) { + QualifiedName name = qualifyName(() -> QualifiedName.ofDatabase(catalogName, databaseName)); + return requestWrapper(name, "getDatabase", () -> databaseService.get(name, includeUserMetadata)); + } + + @Override + public TableDto getMView(String catalogName, String databaseName, String tableName, String viewName) { + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "getMView", (Supplier) () -> { + Optional table = mViewService.get(name); + return table.orElseThrow(() -> new MetacatNotFoundException("Unable to find view: " + name)); + }); + } + + @Override + public List getMViews(String catalogName) { + QualifiedName name = qualifyName(() -> QualifiedName.ofCatalog(catalogName)); + return requestWrapper(name, "getMViews", () -> mViewService.list(name)); + } + + @Override + public List getMViews(String catalogName, String databaseName, String tableName) { + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "getMViews", () -> mViewService.list(name)); + } + + @Override + public TableDto getTable(String catalogName, String databaseName, String tableName, Boolean includeInfo, Boolean includeDefinitionMetadata, Boolean includeDataMetadata) { + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "getTable", (Supplier) () -> { + Optional table = tableService.get(name, includeInfo, includeDefinitionMetadata, includeDataMetadata); + return table.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); + }); + } + + @Override + public void renameTable(String catalogName, String databaseName, String tableName, String newTableName) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName oldName = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + QualifiedName newName = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, newTableName)); + requestWrapper(oldName, "renameTable", () -> { + eventBus.post(new MetacatRenameTablePreEvent(newName, oldName, metacatContext)); + + tableService.rename(oldName, newName, false); + + TableDto dto = tableService.get(newName, true).orElseThrow(() -> new IllegalStateException("should exist")); + eventBus.post(new MetacatRenameTablePostEvent(oldName, dto, metacatContext)); + return null; + }); + } + + public void updateCatalog(String catalogName, CreateCatalogDto createCatalogDto) { + throw new MetacatNotSupportedException(); + } + + @Override + public void updateDatabase( + String catalogName, + String databaseName, + DatabaseCreateRequestDto databaseUpdateRequestDto) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofDatabase(catalogName, databaseName)); + requestWrapper(name, "updateDatabase", () -> { + eventBus.post(new MetacatUpdateDatabasePreEvent(name, metacatContext)); + + databaseService.update(name, databaseUpdateRequestDto); + + eventBus.post(new MetacatUpdateDatabasePostEvent(name, metacatContext)); + return null; + }); + } + + @Override + public TableDto updateMView(String catalogName, String databaseName, String tableName, String viewName, TableDto table) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "getMView", () -> { + eventBus.post(new MetacatUpdateMViewPreEvent(name, table, metacatContext)); + + mViewService.update(name, table); + + TableDto dto = mViewService.get(name).orElseThrow(() -> new IllegalStateException("should exist")); + eventBus.post(new MetacatUpdateMViewPostEvent(dto, metacatContext)); + return dto; + }); + } + + @Override + public TableDto updateTable(String catalogName, String databaseName, String tableName, TableDto table) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "updateTable", () -> { + checkArgument(table != null, "Table cannot be null"); + checkArgument(tableName != null && !tableName.isEmpty(), "table name is required"); + checkArgument(table.getName() != null && tableName.equals(table.getName().getTableName()), + "Table name does not match the name in the table"); + + eventBus.post(new MetacatUpdateTablePreEvent(name, table, metacatContext)); + + tableService.update(name, table); + + TableDto dto = tableService.get(name, true).orElseThrow(() -> new IllegalStateException("should exist")); + eventBus.post(new MetacatUpdateTablePostEvent(dto, metacatContext)); + return dto; + }); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/MetadataV1Resource.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/MetadataV1Resource.java new file mode 100644 index 000000000..4c07d7b64 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/MetadataV1Resource.java @@ -0,0 +1,68 @@ +package com.netflix.metacat.main.api; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.api.MetadataV1; +import com.netflix.metacat.common.dto.DataMetadataDto; +import com.netflix.metacat.common.dto.DataMetadataGetRequestDto; +import com.netflix.metacat.common.dto.DefinitionMetadataDto; +import com.netflix.metacat.common.dto.SortOrder; +import com.netflix.metacat.common.usermetadata.UserMetadataService; + +import javax.inject.Inject; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static com.netflix.metacat.main.api.RequestWrapper.requestWrapper; + +/** + * Created by amajumdar on 6/28/15. + */ +public class MetadataV1Resource implements MetadataV1 { + private final UserMetadataService userMetadataService; + + @Inject + public MetadataV1Resource(UserMetadataService userMetadataService) { + this.userMetadataService = userMetadataService; + } + + @Override + public DataMetadataDto getDataMetadata(DataMetadataGetRequestDto metadataGetRequestDto) { + return requestWrapper( "getDataMetadata", () -> { + DataMetadataDto result = null; + if( metadataGetRequestDto.getUri() != null){ + Optional o = userMetadataService.getDataMetadata(metadataGetRequestDto.getUri()); + if(o.isPresent()){ + result = new DataMetadataDto(); + result.setDataMetadata( o.get()); + result.setUri(metadataGetRequestDto.getUri()); + } + } + return result; + }); + } + + @Override + public List getDefinitionMetadataList( + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean lifetime, + String type, + String name, + Set propertyNames) { + if(lifetime){ + propertyNames.add("lifetime"); + } + return requestWrapper( "getDefinitionMetadataList" + , () -> userMetadataService.searchDefinitionMetadatas(propertyNames, type, name, sortBy, sortOrder!=null?sortOrder.name():null, offset, limit)); + } + + @Override + public List searchByOwners(Set owners) { + return requestWrapper( "searchByOwners" + , () -> userMetadataService.searchByOwners(owners)); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/PartitionV1Resource.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/PartitionV1Resource.java new file mode 100644 index 000000000..f3cf391fd --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/PartitionV1Resource.java @@ -0,0 +1,497 @@ +package com.netflix.metacat.main.api; + +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.Sort; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.api.MetacatV1; +import com.netflix.metacat.common.api.PartitionV1; +import com.netflix.metacat.common.dto.GetPartitionsRequestDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.PartitionsSaveRequestDto; +import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; +import com.netflix.metacat.common.dto.SortOrder; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPreEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPreEvent; +import com.netflix.metacat.common.server.events.MetacatEventBus; +import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPreEvent; +import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPreEvent; +import com.netflix.metacat.common.util.MetacatContextManager; +import com.netflix.metacat.main.services.MViewService; +import com.netflix.metacat.main.services.PartitionService; + +import javax.inject.Inject; +import java.util.List; +import java.util.stream.Collectors; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.netflix.metacat.main.api.RequestWrapper.qualifyName; +import static com.netflix.metacat.main.api.RequestWrapper.requestWrapper; + +public class PartitionV1Resource implements PartitionV1 { + private final MViewService mViewService; + private final MetacatEventBus eventBus; + private final MetacatV1 v1; + private final PartitionService partitionService; + + @Inject + public PartitionV1Resource( + MetacatEventBus eventBus, + MetacatV1 v1, + MViewService mViewService, + PartitionService partitionService) { + this.eventBus = eventBus; + this.v1 = v1; + this.mViewService = mViewService; + this.partitionService = partitionService; + } + + @Override + public void deletePartitions(String catalogName, String databaseName, String tableName, List partitionIds) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + requestWrapper(name, "deleteTablePartition", () -> { + if (partitionIds == null || partitionIds.isEmpty()) { + throw new IllegalArgumentException("partitionIds are required"); + } + + eventBus.post(new MetacatDeleteTablePartitionPreEvent(name, partitionIds, metacatContext)); + + partitionService.delete(name, partitionIds); + + eventBus.post(new MetacatDeleteTablePartitionPostEvent(name, partitionIds, metacatContext)); + return null; + }); + } + + @Override + public void deletePartitions( + String catalogName, + String databaseName, + String tableName, + String viewName, + List partitionIds) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + requestWrapper(name, "deleteMViewPartition", () -> { + if (partitionIds == null || partitionIds.isEmpty()) { + throw new IllegalArgumentException("partitionIds are required"); + } + + eventBus.post(new MetacatDeleteMViewPartitionPreEvent(name, partitionIds, metacatContext)); + + mViewService.deletePartitions(name, partitionIds); + + eventBus.post(new MetacatDeleteMViewPartitionPostEvent(name, partitionIds, metacatContext)); + return null; + }); + } + + @Override + public Integer getPartitionCount( + String catalogName, + String databaseName, + String tableName) { + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "getPartitionCount", () -> partitionService.count(name)); + } + + @Override + public Integer getPartitionCount( + String catalogName, + String databaseName, + String tableName, + String viewName) { + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "getPartitionCount", () -> mViewService.partitionCount(name)); + } + + @Override + public List getPartitions( + String catalogName, + String databaseName, + String tableName, + String filter, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata) { + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "getPartitions", () -> { + com.facebook.presto.spi.SortOrder order = null; + if (sortOrder != null) { + order = com.facebook.presto.spi.SortOrder.valueOf(sortOrder.name()); + } + return partitionService.list( + name, + filter, + null, + new Sort(sortBy, order), + new Pageable(limit, offset), + includeUserMetadata, + includeUserMetadata, + false + ); + }); + } + + private List getPartitions( + String catalogName, + String databaseName, + String tableName, + String filter, + List partitionNames, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + Boolean includePartitionDetails) { + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper(name, "getPartitions", () -> { + com.facebook.presto.spi.SortOrder order = null; + if (sortOrder != null) { + order = com.facebook.presto.spi.SortOrder.valueOf(sortOrder.name()); + } + return partitionService.list( + name, + filter, + partitionNames, + new Sort(sortBy, order), + new Pageable(limit, offset), + includeUserMetadata, + includeUserMetadata, + includePartitionDetails + ); + }); + } + + @Override + public List getPartitions( + String catalogName, + String databaseName, + String tableName, + String viewName, + String filter, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata) { + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "getPartitions", () -> { + com.facebook.presto.spi.SortOrder order = null; + if (sortOrder != null) { + order = com.facebook.presto.spi.SortOrder.valueOf(sortOrder.name()); + } + return mViewService.listPartitions( + name, + filter, + null, + new Sort(sortBy, order), + new Pageable(limit, offset), + includeUserMetadata, + false + ); + }); + } + + private List getPartitions( + String catalogName, + String databaseName, + String tableName, + String viewName, + String filter, + List partitionNames, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + Boolean includePartitionDetails) { + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "getPartitions", () -> { + com.facebook.presto.spi.SortOrder order = null; + if (sortOrder != null) { + order = com.facebook.presto.spi.SortOrder.valueOf(sortOrder.name()); + } + return mViewService.listPartitions( + name, + filter, + partitionNames, + new Sort(sortBy, order), + new Pageable(limit, offset), + includeUserMetadata, + includePartitionDetails + ); + }); + } + + @Override + public List getPartitionKeysForRequest( + String catalogName, + String databaseName, + String tableName, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + GetPartitionsRequestDto getPartitionsRequestDto) { + List result = getPartitionsForRequest(catalogName, databaseName, tableName, sortBy, sortOrder, + offset, limit, includeUserMetadata, getPartitionsRequestDto); + return result.stream().map(partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList()); + } + + @Override + public List getPartitionUrisForRequest( + String catalogName, + String databaseName, + String tableName, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + GetPartitionsRequestDto getPartitionsRequestDto) { + List result = getPartitionsForRequest(catalogName, databaseName, tableName, sortBy, sortOrder, + offset, limit, includeUserMetadata, getPartitionsRequestDto); + return result.stream().map(PartitionDto::getDataUri).collect(Collectors.toList()); + } + + @Override + public List getPartitionKeys( + String catalogName, + String databaseName, + String tableName, + String filter, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata) { + List result = getPartitions(catalogName, databaseName, tableName, filter, sortBy, sortOrder, + offset, limit, includeUserMetadata); + return result.stream().map(partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList()); + } + + @Override + public List getPartitionKeys( + String catalogName, + String databaseName, + String tableName, + String viewName, + String filter, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata) { + List result = getPartitions(catalogName, databaseName, tableName, viewName, filter, sortBy, + sortOrder, offset, limit, includeUserMetadata); + return result.stream().map(partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList()); + } + + @Override + public List getPartitionsForRequest( + String catalogName, + String databaseName, + String tableName, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + GetPartitionsRequestDto getPartitionsRequestDto) { + String filterExpression = null; + List partitionNames = null; + Boolean includePartitionDetails = false; + if( getPartitionsRequestDto != null){ + filterExpression = getPartitionsRequestDto.getFilter(); + partitionNames = getPartitionsRequestDto.getPartitionNames(); + includePartitionDetails = getPartitionsRequestDto.getIncludePartitionDetails(); + } + return getPartitions(catalogName, databaseName, tableName, filterExpression, partitionNames, sortBy, sortOrder, offset, limit, + includeUserMetadata, includePartitionDetails); + } + + @Override + public List getPartitionKeysForRequest( + String catalogName, + String databaseName, + String tableName, + String viewName, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + GetPartitionsRequestDto getPartitionsRequestDto) { + List result = getPartitionsForRequest(catalogName, databaseName, tableName, viewName, sortBy, + sortOrder, offset, limit, includeUserMetadata, getPartitionsRequestDto); + return result.stream().map(partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList()); + } + + @Override + public List getPartitionUris( + String catalogName, + String databaseName, + String tableName, + String filter, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata) { + List result = getPartitions(catalogName, databaseName, tableName, filter, sortBy, sortOrder, + offset, limit, includeUserMetadata); + return result.stream().map(PartitionDto::getDataUri).collect(Collectors.toList()); + } + + @Override + public List getPartitionUris( + String catalogName, + String databaseName, + String tableName, + String viewName, + String filter, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata) { + List result = getPartitions(catalogName, databaseName, tableName, viewName, filter, sortBy, + sortOrder, offset, limit, includeUserMetadata); + return result.stream().map(PartitionDto::getDataUri).collect(Collectors.toList()); + } + + @Override + public List getPartitionsForRequest( + String catalogName, + String databaseName, + String tableName, + String viewName, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + GetPartitionsRequestDto getPartitionsRequestDto) { + String filterExpression = null; + List partitionNames = null; + Boolean includePartitionDetails = false; + if( getPartitionsRequestDto != null){ + filterExpression = getPartitionsRequestDto.getFilter(); + partitionNames = getPartitionsRequestDto.getPartitionNames(); + includePartitionDetails = getPartitionsRequestDto.getIncludePartitionDetails(); + } + return getPartitions(catalogName, databaseName, tableName, viewName, filterExpression, partitionNames, sortBy, sortOrder, + offset, limit, includeUserMetadata, includePartitionDetails); + } + + @Override + public List getPartitionUrisForRequest( + String catalogName, + String databaseName, + String tableName, + String viewName, + String sortBy, + SortOrder sortOrder, + Integer offset, + Integer limit, + Boolean includeUserMetadata, + GetPartitionsRequestDto getPartitionsRequestDto) { + List result = getPartitionsForRequest(catalogName, databaseName, tableName, viewName, sortBy, + sortOrder, offset, limit, includeUserMetadata, getPartitionsRequestDto); + return result.stream().map(PartitionDto::getDataUri).collect(Collectors.toList()); + } + + @Override + public PartitionsSaveResponseDto savePartitions( + String catalogName, + String databaseName, + String tableName, + PartitionsSaveRequestDto partitionsSaveRequestDto) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = QualifiedName.ofTable(catalogName, databaseName, tableName); + return requestWrapper(name, "saveTablePartition", () -> { + checkArgument(partitionsSaveRequestDto != null && + partitionsSaveRequestDto.getPartitions() != null && + !partitionsSaveRequestDto.getPartitions().isEmpty(), + "Partitions must be present"); + + List partitionsToSave = partitionsSaveRequestDto.getPartitions(); + boolean checkIfExists = partitionsSaveRequestDto.getCheckIfExists() == null?true:partitionsSaveRequestDto.getCheckIfExists(); + eventBus.post(new MetacatSaveTablePartitionPreEvent(name, partitionsToSave, metacatContext)); + List partitionIdsForDeletes = partitionsSaveRequestDto.getPartitionIdsForDeletes(); + if( partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()){ + eventBus.post(new MetacatDeleteTablePartitionPreEvent(name, partitionIdsForDeletes, metacatContext)); + } + + PartitionsSaveResponseDto result = partitionService.save(name, partitionsToSave, partitionIdsForDeletes, checkIfExists); + + // This metadata is actually for the table, if it is present update that + if (partitionsSaveRequestDto.getDefinitionMetadata() != null + || partitionsSaveRequestDto.getDataMetadata() != null) { + TableDto dto = v1.getTable(catalogName, databaseName, tableName, true, false, false); + dto.setDefinitionMetadata(partitionsSaveRequestDto.getDefinitionMetadata()); + dto.setDataMetadata(partitionsSaveRequestDto.getDataMetadata()); + v1.updateTable(catalogName, databaseName, tableName, dto); + } + + eventBus.post(new MetacatSaveTablePartitionPostEvent(name, partitionsToSave, metacatContext)); + if( partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()){ + eventBus.post(new MetacatDeleteTablePartitionPostEvent(name, partitionIdsForDeletes, metacatContext)); + } + return result; + }); + } + + @Override + public PartitionsSaveResponseDto savePartitions( + String catalogName, + String databaseName, + String tableName, + String viewName, + PartitionsSaveRequestDto partitionsSaveRequestDto) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofView(catalogName, databaseName, tableName, viewName)); + return requestWrapper(name, "saveMViewPartition", () -> { + checkArgument(partitionsSaveRequestDto != null && + partitionsSaveRequestDto.getPartitions() != null && + !partitionsSaveRequestDto.getPartitions().isEmpty(), + "Partitions must be present"); + + List partitionsToSave = partitionsSaveRequestDto.getPartitions(); + boolean checkIfExists = partitionsSaveRequestDto.getCheckIfExists() == null?true:partitionsSaveRequestDto.getCheckIfExists(); + eventBus.post(new MetacatSaveMViewPartitionPreEvent(name, partitionsToSave, metacatContext)); + List partitionIdsForDeletes = partitionsSaveRequestDto.getPartitionIdsForDeletes(); + if( partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()){ + eventBus.post(new MetacatDeleteMViewPartitionPreEvent(name, partitionIdsForDeletes, metacatContext)); + } + + PartitionsSaveResponseDto result = mViewService.savePartitions(name, partitionsToSave, partitionIdsForDeletes, true, checkIfExists); + + // This metadata is actually for the view, if it is present update that + if (partitionsSaveRequestDto.getDefinitionMetadata() != null + || partitionsSaveRequestDto.getDataMetadata() != null) { + TableDto dto = v1.getMView(catalogName, databaseName, tableName, viewName); + dto.setDefinitionMetadata(partitionsSaveRequestDto.getDefinitionMetadata()); + dto.setDataMetadata(partitionsSaveRequestDto.getDataMetadata()); + v1.updateMView(catalogName, databaseName, tableName, viewName, dto); + } + + eventBus.post(new MetacatSaveMViewPartitionPostEvent(name, partitionsToSave, metacatContext)); + if( partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()){ + eventBus.post(new MetacatDeleteMViewPartitionPostEvent(name, partitionIdsForDeletes, metacatContext)); + } + return result; + }); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/RequestWrapper.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/RequestWrapper.java new file mode 100644 index 000000000..8c77ce080 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/RequestWrapper.java @@ -0,0 +1,105 @@ +package com.netflix.metacat.main.api; + +import com.facebook.presto.exception.InvalidMetaException; +import com.facebook.presto.exception.PartitionAlreadyExistsException; +import com.facebook.presto.exception.SchemaAlreadyExistsException; +import com.facebook.presto.hive.TableAlreadyExistsException; +import com.facebook.presto.spi.NotFoundException; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.StandardErrorCode; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.exception.MetacatAlreadyExistsException; +import com.netflix.metacat.common.exception.MetacatBadRequestException; +import com.netflix.metacat.common.exception.MetacatException; +import com.netflix.metacat.common.exception.MetacatNotFoundException; +import com.netflix.metacat.common.exception.MetacatNotSupportedException; +import com.netflix.metacat.common.exception.MetacatUserMetadataException; +import com.netflix.metacat.common.monitoring.CounterWrapper; +import com.netflix.metacat.common.monitoring.TimerWrapper; +import com.netflix.metacat.common.usermetadata.UserMetadataServiceException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.core.Response; +import java.util.function.Supplier; + +public class RequestWrapper { + private static final Logger log = LoggerFactory.getLogger(RequestWrapper.class); + + public static QualifiedName qualifyName(Supplier nameSupplier) { + try { + return nameSupplier.get(); + } catch (Exception e) { + log.error("Invalid qualified name", e); + throw new MetacatBadRequestException(e.getMessage()); + } + } + + public static R requestWrapper( + QualifiedName name, + String resourceRequestName, + Supplier supplier) { + TimerWrapper timer = TimerWrapper.createStarted("dse.metacat.timer." + resourceRequestName); + CounterWrapper.incrementCounter("dse.metacat.counter." + resourceRequestName); + try { + log.info("### Calling method: {} for {}", resourceRequestName, name); + return supplier.get(); + } catch (UnsupportedOperationException e) { + log.error(e.getMessage(), e); + throw new MetacatNotSupportedException("Catalog does not support the operation"); + } catch (SchemaAlreadyExistsException | TableAlreadyExistsException |PartitionAlreadyExistsException e) { + log.error(e.getMessage(), e); + throw new MetacatAlreadyExistsException(e.getMessage()); + } catch (NotFoundException|MetacatNotFoundException e) { + log.error(e.getMessage(), e); + throw new MetacatNotFoundException("Unable to locate: " + name); + } catch (InvalidMetaException | IllegalArgumentException e) { + log.error(e.getMessage(), e); + throw new MetacatBadRequestException(String.format("%s.%s",e.getMessage(), e.getCause()==null?"":e.getCause().getMessage())); + } catch (PrestoException e) { + String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause()==null?"":e.getCause().getMessage(), resourceRequestName, name); + log.error(message, e); + if (e.getErrorCode() == StandardErrorCode.NOT_SUPPORTED.toErrorCode()) { + throw new MetacatNotSupportedException("Catalog does not support the operation"); + } else { + CounterWrapper.incrementCounter("dse.metacat.counter.failure." + resourceRequestName); + throw new MetacatException(message, Response.Status.INTERNAL_SERVER_ERROR, e); + } + } catch(UserMetadataServiceException e){ + String message = String.format("%s.%s -- %s usermetadata operation failed for %s", e.getMessage(), e.getCause()==null?"":e.getCause().getMessage(), resourceRequestName, name); + throw new MetacatUserMetadataException(message); + } catch (Exception e) { + CounterWrapper.incrementCounter("dse.metacat.counter.failure." + resourceRequestName); + String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause()==null?"":e.getCause().getMessage(), resourceRequestName, name); + log.error(message, e); + throw new MetacatException(message, Response.Status.INTERNAL_SERVER_ERROR, e); + } finally { + log.info("### Time taken to complete {} is {} ms", resourceRequestName, timer.stop()); + } + } + + public static R requestWrapper( + String resourceRequestName, + Supplier supplier) { + TimerWrapper timer = TimerWrapper.createStarted("dse.metacat.timer." + resourceRequestName); + CounterWrapper.incrementCounter("dse.metacat.counter." + resourceRequestName); + try { + log.info("### Calling method: {}", resourceRequestName); + return supplier.get(); + } catch (UnsupportedOperationException e) { + log.error(e.getMessage(), e); + throw new MetacatNotSupportedException("Catalog does not support the operation"); + } catch (IllegalArgumentException e) { + log.error(e.getMessage(), e); + throw new MetacatBadRequestException(String.format("%s.%s", e.getMessage(), + e.getCause() == null ? "" : e.getCause().getMessage())); + } catch (Exception e) { + CounterWrapper.incrementCounter("dse.metacat.counter.failure." + resourceRequestName); + String message = String.format("%s.%s -- %s failed.", e.getMessage(), e.getCause()==null?"":e.getCause().getMessage(), resourceRequestName); + log.error(message, e); + throw new MetacatException(message, Response.Status.INTERNAL_SERVER_ERROR, e); + } finally { + log.info("### Time taken to complete {} is {} ms", resourceRequestName, timer.stop()); + } + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/SearchMetacatV1Resource.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/SearchMetacatV1Resource.java new file mode 100644 index 000000000..fb73c8f55 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/SearchMetacatV1Resource.java @@ -0,0 +1,24 @@ +package com.netflix.metacat.main.api; + +import com.netflix.metacat.common.api.SearchMetacatV1; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.main.services.search.ElasticSearchUtil; + +import javax.inject.Inject; +import java.util.List; + +/** + * Created by amajumdar on 12/17/15. + */ +public class SearchMetacatV1Resource implements SearchMetacatV1 { + ElasticSearchUtil elasticSearchUtil; + + @Inject + public SearchMetacatV1Resource(ElasticSearchUtil elasticSearchUtil) { + this.elasticSearchUtil = elasticSearchUtil; + } + @Override + public List searchTables(String searchString) { + return elasticSearchUtil.simpleSearch(searchString); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/api/TagV1Resource.java b/metacat-main/src/main/java/com/netflix/metacat/main/api/TagV1Resource.java new file mode 100644 index 000000000..0aa1b6df3 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/api/TagV1Resource.java @@ -0,0 +1,86 @@ +package com.netflix.metacat.main.api; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.api.TagV1; +import com.netflix.metacat.common.server.events.MetacatEventBus; +import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent; +import com.netflix.metacat.common.usermetadata.TagService; +import com.netflix.metacat.common.util.MetacatContextManager; + +import javax.inject.Inject; +import java.util.List; +import java.util.Set; + +import static com.netflix.metacat.main.api.RequestWrapper.qualifyName; +import static com.netflix.metacat.main.api.RequestWrapper.requestWrapper; + +/** + * Created by amajumdar on 6/28/15. + */ +public class TagV1Resource implements TagV1{ + TagService tagService; + MetacatEventBus eventBus; + @Inject + public TagV1Resource( MetacatEventBus eventBus, TagService tagService) { + this.tagService = tagService; + this.eventBus = eventBus; + } + + @Override + public Set getTags() { + return requestWrapper("TagV1Resource.getTags" , tagService::getTags); + } + + @Override + public List list( + Set includeTags, + Set excludeTags, + String sourceName, + String databaseName, + String tableName) { + return requestWrapper("TagV1Resource.list" , () -> tagService.list( includeTags, excludeTags, sourceName, databaseName, tableName)); + } + + @Override + public List search( + String tag, + String sourceName, + String databaseName, + String tableName) { + return requestWrapper("TagV1Resource.search" , () -> tagService.search( tag, sourceName, databaseName, tableName)); + } + + @Override + public Set setTableTags( + String catalogName, + String databaseName, + String tableName, + Set tags) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + return requestWrapper("TagV1Resource.setTableTags" , () -> { + Set result = tagService.setTableTags( name, tags, true); + + eventBus.post(new MetacatUpdateTablePostEvent(name, metacatContext)); + return result; + }); + } + + @Override + public void removeTableTags( + String catalogName, + String databaseName, + String tableName, + Boolean deleteAll, + Set tags) { + MetacatContext metacatContext = MetacatContextManager.getContext(); + QualifiedName name = qualifyName(() -> QualifiedName.ofTable(catalogName, databaseName, tableName)); + requestWrapper("TagV1Resource.removeTableTags" , () -> { + tagService.removeTableTags( name, deleteAll, tags, true); + + eventBus.post(new MetacatUpdateTablePostEvent(name, metacatContext)); + return null; + }); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/connector/MetacatConnectorManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/connector/MetacatConnectorManager.java new file mode 100644 index 000000000..91405c4b8 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/connector/MetacatConnectorManager.java @@ -0,0 +1,80 @@ +package com.netflix.metacat.main.connector; + +import com.facebook.presto.exception.CatalogNotFoundException; +import com.facebook.presto.security.AccessControlManager; +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.NodeManager; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.main.presto.connector.ConnectorManager; +import com.netflix.metacat.main.presto.index.IndexManager; +import com.netflix.metacat.main.presto.metadata.HandleResolver; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.presto.split.PageSinkManager; +import com.netflix.metacat.main.presto.split.PageSourceManager; +import com.netflix.metacat.main.presto.split.SplitManager; +import com.netflix.metacat.main.spi.MetacatCatalogConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import javax.inject.Inject; +import javax.inject.Singleton; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import static com.netflix.metacat.main.spi.MetacatCatalogConfig.createFromMapAndRemoveProperties; + +@Singleton +public class MetacatConnectorManager extends ConnectorManager { + private static final Logger log = LoggerFactory.getLogger(MetacatConnectorManager.class); + private final ConcurrentHashMap catalogs = new ConcurrentHashMap<>(); + + @Inject + public MetacatConnectorManager(MetadataManager metadataManager, + AccessControlManager accessControlManager, + SplitManager splitManager, + PageSourceManager pageSourceManager, + IndexManager indexManager, + PageSinkManager pageSinkManager, + HandleResolver handleResolver, + Map connectorFactories, + NodeManager nodeManager) { + super(metadataManager, accessControlManager, splitManager, pageSourceManager, indexManager, pageSinkManager, handleResolver, + connectorFactories, nodeManager); + } + + @Override + public synchronized void createConnection( + String catalogName, ConnectorFactory connectorFactory, Map properties) { + properties = Maps.newHashMap(properties); + MetacatCatalogConfig config = createFromMapAndRemoveProperties(connectorFactory.getName(), properties); + + super.createConnection(catalogName, connectorFactory, properties); + + catalogs.put(catalogName, config); + } + + @Nonnull + public MetacatCatalogConfig getCatalogConfig(QualifiedName name) { + return getCatalogConfig(name.getCatalogName()); + } + + @Nonnull + public MetacatCatalogConfig getCatalogConfig(String catalogName) { + if (Strings.isNullOrEmpty(catalogName)) { + throw new IllegalArgumentException("catalog-name is required"); + } + if (!catalogs.containsKey(catalogName)) { + throw new CatalogNotFoundException(catalogName); + } + return catalogs.get(catalogName); + } + + @Nonnull + public Map getCatalogs() { + return ImmutableMap.copyOf(catalogs); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatInitializationService.java b/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatInitializationService.java new file mode 100644 index 000000000..a952c98ec --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatInitializationService.java @@ -0,0 +1,95 @@ +package com.netflix.metacat.main.init; + +import com.facebook.presto.metadata.CatalogManagerConfig; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; +import com.google.inject.Injector; +import com.google.inject.Provider; +import com.google.inject.spi.ProviderInstanceBinding; +import com.netflix.metacat.common.server.Config; +import com.netflix.metacat.common.server.events.MetacatEventBus; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.main.manager.PluginManager; +import com.netflix.metacat.main.presto.metadata.CatalogManager; +import com.netflix.metacat.main.services.search.MetacatEventHandlers; +import io.airlift.configuration.ConfigurationFactory; +import io.airlift.configuration.ConfigurationProvider; +import org.elasticsearch.client.Client; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static com.google.common.base.Preconditions.checkArgument; + +public class MetacatInitializationService { + private static final Logger log = LoggerFactory.getLogger(MetacatInitializationService.class); + private final Config config; + private final ExecutorService eventExecutor; + private final Injector injector; + + @Inject + public MetacatInitializationService(Injector injector, Config config) { + this.config = config; + this.eventExecutor = Executors.newFixedThreadPool(config.getEventBusExecutorThreadCount()); + this.injector = injector; + } + + public ConfigurationFactory getConfigurationFactory() { + String pluginConfigDir = config.getPluginConfigLocation(); + checkArgument(!Strings.isNullOrEmpty(pluginConfigDir), + "Missing required property metacat.plugin.config.location"); + log.info("Loading catalogs from directory '{}'", pluginConfigDir); + + Map properties = ImmutableMap.of("plugin.config-dir", pluginConfigDir); + return new ConfigurationFactory(properties); + } + + public void start() throws Exception { + ConfigurationFactory configurationFactory = getConfigurationFactory(); + ProviderInstanceBinding providerInstanceBinding = (ProviderInstanceBinding) injector + .getBinding(CatalogManagerConfig.class); + Provider provider = providerInstanceBinding.getProviderInstance(); + ((ConfigurationProvider) provider).setConfigurationFactory(configurationFactory); + injector.getInstance(PluginManager.class).loadPlugins(); + injector.getInstance(CatalogManager.class).loadCatalogs(); + // Initialize user metadata service + injector.getInstance(UserMetadataService.class).start(); + + // Start the thrift services + MetacatThriftService thriftService = injector.getInstance(MetacatThriftService.class); + thriftService.start(); + + MetacatEventBus eventBus = new MetacatEventBus(eventExecutor); + // Initialize elastic search client + Client client = injector.getInstance(Client.class); + if( client != null){ + MetacatEventHandlers handlers = injector.getInstance(MetacatEventHandlers.class); + eventBus.register(handlers); + } + } + + public void stop() throws Exception { + injector.getInstance(UserMetadataService.class).stop(); + + // Start the thrift services + MetacatThriftService thriftService = injector.getInstance(MetacatThriftService.class); + thriftService.stop(); + + // Shutdown the executor used for event bus + if(eventExecutor != null){ + // Make the executor accept no new threads and finish all existing + // threads in the queue + eventExecutor.shutdown(); + try { + eventExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + log.error("Error while shutting down executor service : ", e); + } + } + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatServletModule.java b/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatServletModule.java new file mode 100644 index 000000000..3c7532a69 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatServletModule.java @@ -0,0 +1,36 @@ +package com.netflix.metacat.main.init; + +import com.google.inject.servlet.ServletModule; +import com.netflix.metacat.common.api.MetacatV1; +import com.netflix.metacat.common.api.MetadataV1; +import com.netflix.metacat.common.api.PartitionV1; +import com.netflix.metacat.common.api.SearchMetacatV1; +import com.netflix.metacat.common.api.TagV1; +import com.netflix.metacat.common.server.CommonModule; +import com.netflix.metacat.converters.ConvertersModule; +import com.netflix.metacat.main.api.MetacatV1Resource; +import com.netflix.metacat.main.api.MetadataV1Resource; +import com.netflix.metacat.main.api.PartitionV1Resource; +import com.netflix.metacat.main.api.SearchMetacatV1Resource; +import com.netflix.metacat.main.api.TagV1Resource; +import com.netflix.metacat.main.manager.ManagerModule; +import com.netflix.metacat.main.services.ServicesModule; +import com.netflix.metacat.thrift.ThriftModule; + +public class MetacatServletModule extends ServletModule { + @Override + protected void configureServlets() { + install(new CommonModule()); + install(new ConvertersModule()); + install(new ThriftModule()); + install(new ManagerModule()); + install(new ServicesModule()); + + binder().bind(MetacatV1.class).to(MetacatV1Resource.class).asEagerSingleton(); + binder().bind(PartitionV1.class).to(PartitionV1Resource.class).asEagerSingleton(); + binder().bind(MetadataV1.class).to(MetadataV1Resource.class).asEagerSingleton(); + binder().bind(SearchMetacatV1.class).to(SearchMetacatV1Resource.class).asEagerSingleton(); + binder().bind(TagV1.class).to(TagV1Resource.class).asEagerSingleton(); + binder().bind(MetacatThriftService.class).asEagerSingleton(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatThriftService.java b/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatThriftService.java new file mode 100644 index 000000000..8df6b3dab --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/init/MetacatThriftService.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.main.init; + +import com.google.inject.Inject; +import com.netflix.metacat.main.connector.MetacatConnectorManager; +import com.netflix.metacat.thrift.CatalogThriftService; +import com.netflix.metacat.thrift.CatalogThriftServiceFactory; + +import java.util.List; +import java.util.stream.Collectors; + +public class MetacatThriftService { + private final MetacatConnectorManager connectorManager; + private final CatalogThriftServiceFactory thriftServiceFactory; + + @Inject + public MetacatThriftService(CatalogThriftServiceFactory c, MetacatConnectorManager m) { + this.thriftServiceFactory = c; + this.connectorManager = m; + } + + protected List getCatalogThriftServices() { + return connectorManager.getCatalogs() + .entrySet() + .stream() + .filter(entry -> entry.getValue().isThriftInterfaceRequested()) + .map(entry -> thriftServiceFactory.create(entry.getKey(), entry.getValue().getThriftPort())) + .collect(Collectors.toList()); + } + + public void start() throws Exception { + for (CatalogThriftService service : getCatalogThriftServices()) { + service.start(); + } + } + + public void stop() throws Exception { + for (CatalogThriftService service : getCatalogThriftServices()) { + service.stop(); + } + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/manager/ManagerModule.java b/metacat-main/src/main/java/com/netflix/metacat/main/manager/ManagerModule.java new file mode 100644 index 000000000..15c3026b7 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/manager/ManagerModule.java @@ -0,0 +1,93 @@ +package com.netflix.metacat.main.manager; + +import com.facebook.presto.connector.informationSchema.InformationSchemaModule; +import com.facebook.presto.metadata.CatalogManagerConfig; +import com.facebook.presto.metadata.InMemoryNodeManager; +import com.facebook.presto.metadata.InternalNodeManager; +import com.facebook.presto.metadata.Metadata; +import com.facebook.presto.metadata.RemoteSplitHandleResolver; +import com.facebook.presto.metadata.ViewDefinition; +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.ConnectorHandleResolver; +import com.facebook.presto.spi.ConnectorPageSourceProvider; +import com.facebook.presto.spi.ConnectorRecordSinkProvider; +import com.facebook.presto.spi.NodeManager; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.facebook.presto.split.PageSourceProvider; +import com.facebook.presto.type.TypeDeserializer; +import com.facebook.presto.type.TypeRegistry; +import com.google.common.collect.Maps; +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import com.google.inject.multibindings.MapBinder; +import com.netflix.metacat.main.connector.MetacatConnectorManager; +import com.netflix.metacat.main.presto.connector.ConnectorManager; +import com.netflix.metacat.main.presto.index.IndexManager; +import com.netflix.metacat.main.presto.metadata.HandleResolver; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.presto.split.PageSourceManager; +import com.netflix.metacat.main.presto.split.SplitManager; +import io.airlift.configuration.ConfigurationFactory; + +import static com.google.inject.multibindings.MapBinder.newMapBinder; +import static com.google.inject.multibindings.Multibinder.newSetBinder; +import static io.airlift.configuration.ConfigurationModule.bindConfig; +import static io.airlift.json.JsonBinder.jsonBinder; +import static io.airlift.json.JsonCodecBinder.jsonCodecBinder; + +/** + * Created by amajumdar on 1/14/15. + */ +public class ManagerModule extends AbstractModule { + @Override + protected void configure() { + // Configuration factory + binder().bind(ConfigurationFactory.class).toInstance(new ConfigurationFactory(Maps.newHashMap())); + // Node manager + binder().bind(NodeManager.class).to(InMemoryNodeManager.class).in(Scopes.SINGLETON); + binder().bind(InternalNodeManager.class).to(InMemoryNodeManager.class).in(Scopes.SINGLETON); + + // split manager + binder().bind(SplitManager.class).in(Scopes.SINGLETON); + + // data stream provider + binder().bind(PageSourceManager.class).in(Scopes.SINGLETON); + binder().bind(PageSourceProvider.class).to(PageSourceManager.class).in(Scopes.SINGLETON); + newSetBinder(binder(), ConnectorPageSourceProvider.class); + + // record sink provider + newSetBinder(binder(), ConnectorRecordSinkProvider.class); + // metadata + binder().bind(MetadataManager.class).in(Scopes.SINGLETON); + binder().bind(Metadata.class).to(MetadataManager.class).in(Scopes.SINGLETON); + + // type + binder().bind(TypeRegistry.class).in(Scopes.SINGLETON); + binder().bind(TypeManager.class).to(TypeRegistry.class).in(Scopes.SINGLETON); + jsonBinder(binder()).addDeserializerBinding(Type.class).to(TypeDeserializer.class); + newSetBinder(binder(), Type.class); + + // index manager + binder().bind(IndexManager.class).in(Scopes.SINGLETON); + + // handle resolver + binder().bind(HandleResolver.class).in(Scopes.SINGLETON); + MapBinder connectorHandleResolverBinder = newMapBinder(binder(), String.class, + ConnectorHandleResolver.class); + connectorHandleResolverBinder.addBinding("remote").to(RemoteSplitHandleResolver.class).in(Scopes.SINGLETON); + + // connector + binder().bind(ConnectorManager.class).to(MetacatConnectorManager.class).in(Scopes.SINGLETON); + newMapBinder(binder(), String.class, ConnectorFactory.class); + + // information schema + binder().install(new InformationSchemaModule()); + + // json codec + jsonCodecBinder(binder()).bindJsonCodec(ViewDefinition.class); + + // + bindConfig(binder()).to(CatalogManagerConfig.class); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/manager/PluginManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/manager/PluginManager.java new file mode 100644 index 000000000..d57118dc6 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/manager/PluginManager.java @@ -0,0 +1,91 @@ +package com.netflix.metacat.main.manager; + +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.Plugin; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.type.TypeRegistry; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Maps; +import com.google.inject.Injector; +import com.google.inject.Singleton; +import com.netflix.metacat.main.presto.connector.ConnectorManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.ServiceLoader; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 1/14/15. + */ +@Singleton +public class PluginManager { + private static final Logger log = LoggerFactory.getLogger(PluginManager.class); + private final ConnectorManager connectorManager; + private final Injector injector; + private final Map optionalConfig; + private final AtomicBoolean pluginsLoaded = new AtomicBoolean(); + private final AtomicBoolean pluginsLoading = new AtomicBoolean(); + private final TypeRegistry typeRegistry; + + @Inject + public PluginManager(Injector injector, + ConnectorManager connectorManager, + TypeRegistry typeRegistry) { + checkNotNull(injector, "injector is null"); + + this.injector = injector; + + optionalConfig = Maps.newConcurrentMap(); + + this.connectorManager = checkNotNull(connectorManager, "connectorManager is null"); + this.typeRegistry = checkNotNull(typeRegistry, "typeRegistry is null"); + } + + public boolean arePluginsLoaded() { + return pluginsLoaded.get(); + } + + public void installPlugin(Plugin plugin) { + injector.injectMembers(plugin); + + plugin.setOptionalConfig(optionalConfig); + + for (Type type : plugin.getServices(Type.class)) { + log.info("Registering type {}", type.getTypeSignature()); + typeRegistry.addType(type); + } + + for (ConnectorFactory connectorFactory : plugin.getServices(ConnectorFactory.class)) { + log.info("Registering connector {}", connectorFactory.getName()); + connectorManager.addConnectorFactory(connectorFactory); + } + } + + public void loadPlugins() + throws Exception { + if (!pluginsLoading.compareAndSet(false, true)) { + return; + } + + ServiceLoader serviceLoader = ServiceLoader.load(Plugin.class, this.getClass().getClassLoader()); + List plugins = ImmutableList.copyOf(serviceLoader); + + if (plugins.isEmpty()) { + log.warn("No service providers of type {}", Plugin.class.getName()); + } + + for (Plugin plugin : plugins) { + log.info("Installing {}", plugin.getClass().getName()); + installPlugin(plugin); + log.info("-- Finished loading plugin {} --", plugin.getClass().getName()); + } + + pluginsLoaded.set(true); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/connector/ConnectorManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/connector/ConnectorManager.java new file mode 100644 index 000000000..4804518eb --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/connector/ConnectorManager.java @@ -0,0 +1,290 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.connector; + +import com.facebook.presto.connector.informationSchema.InformationSchemaMetadata; +import com.facebook.presto.connector.informationSchema.InformationSchemaPageSourceProvider; +import com.facebook.presto.connector.informationSchema.InformationSchemaSplitManager; +import com.facebook.presto.connector.system.SystemConnector; +import com.facebook.presto.security.AccessControlManager; +import com.facebook.presto.spi.Connector; +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.ConnectorHandleResolver; +import com.facebook.presto.spi.ConnectorIndexResolver; +import com.facebook.presto.spi.ConnectorMetadata; +import com.facebook.presto.spi.ConnectorPageSinkProvider; +import com.facebook.presto.spi.ConnectorPageSourceProvider; +import com.facebook.presto.spi.ConnectorRecordSetProvider; +import com.facebook.presto.spi.ConnectorRecordSinkProvider; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.NodeManager; +import com.facebook.presto.spi.SystemTable; +import com.facebook.presto.spi.classloader.ThreadContextClassLoader; +import com.facebook.presto.spi.security.ConnectorAccessControl; +import com.facebook.presto.spi.session.PropertyMetadata; +import com.facebook.presto.split.RecordPageSinkProvider; +import com.facebook.presto.split.RecordPageSourceProvider; +import com.netflix.metacat.main.presto.index.IndexManager; +import com.netflix.metacat.main.presto.metadata.HandleResolver; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.presto.split.PageSinkManager; +import com.netflix.metacat.main.presto.split.PageSourceManager; +import com.netflix.metacat.main.presto.split.SplitManager; +import io.airlift.log.Logger; + +import javax.annotation.PreDestroy; +import javax.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +public class ConnectorManager +{ + public static final String INFORMATION_SCHEMA_CONNECTOR_PREFIX = "$info_schema@"; + public static final String SYSTEM_TABLES_CONNECTOR_PREFIX = "$system@"; + + private static final Logger log = Logger.get(ConnectorManager.class); + + private final MetadataManager metadataManager; + private final AccessControlManager accessControlManager; + private final SplitManager splitManager; + private final PageSourceManager pageSourceManager; + private final IndexManager indexManager; + + private final PageSinkManager pageSinkManager; + private final HandleResolver handleResolver; + private final NodeManager nodeManager; + + private final ConcurrentMap connectorFactories = new ConcurrentHashMap<>(); + + private final ConcurrentMap connectors = new ConcurrentHashMap<>(); + + private final AtomicBoolean stopped = new AtomicBoolean(); + + @Inject + public ConnectorManager(MetadataManager metadataManager, + AccessControlManager accessControlManager, + SplitManager splitManager, + PageSourceManager pageSourceManager, + IndexManager indexManager, + PageSinkManager pageSinkManager, + HandleResolver handleResolver, + Map connectorFactories, + NodeManager nodeManager) + { + this.metadataManager = metadataManager; + this.accessControlManager = accessControlManager; + this.splitManager = splitManager; + this.pageSourceManager = pageSourceManager; + this.indexManager = indexManager; + this.pageSinkManager = pageSinkManager; + this.handleResolver = handleResolver; + this.nodeManager = nodeManager; + this.connectorFactories.putAll(connectorFactories); + } + + @PreDestroy + public void stop() + { + if (stopped.getAndSet(true)) { + return; + } + + for (Map.Entry entry : connectors.entrySet()) { + Connector connector = entry.getValue(); + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(connector.getClass().getClassLoader())) { + connector.shutdown(); + } + catch (Throwable t) { + log.error(t, "Error shutting down connector: %s", entry.getKey()); + } + } + } + + public void addConnectorFactory(ConnectorFactory connectorFactory) + { + checkState(!stopped.get(), "ConnectorManager is stopped"); + ConnectorFactory existingConnectorFactory = connectorFactories.putIfAbsent(connectorFactory.getName(), connectorFactory); + checkArgument(existingConnectorFactory == null, "Connector %s is already registered", connectorFactory.getName()); + } + + public synchronized void createConnection(String catalogName, String connectorName, Map properties) + { + checkState(!stopped.get(), "ConnectorManager is stopped"); + checkNotNull(catalogName, "catalogName is null"); + checkNotNull(connectorName, "connectorName is null"); + checkNotNull(properties, "properties is null"); + + ConnectorFactory connectorFactory = connectorFactories.get(connectorName); + checkArgument(connectorFactory != null, "No factory for connector %s", connectorName); + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(connectorFactory.getClass().getClassLoader())) { + createConnection(catalogName, connectorFactory, properties); + } + } + + public synchronized void createConnection(String catalogName, ConnectorFactory connectorFactory, Map properties) + { + checkState(!stopped.get(), "ConnectorManager is stopped"); + checkNotNull(catalogName, "catalogName is null"); + checkNotNull(properties, "properties is null"); + checkNotNull(connectorFactory, "connectorFactory is null"); + + String connectorId = getConnectorId(catalogName); + checkState(!connectors.containsKey(connectorId), "A connector %s already exists", connectorId); + + Connector connector = connectorFactory.create(connectorId, properties); + + addConnector(catalogName, connectorId, connector); + } + + public synchronized void createConnection(String catalogName, Connector connector) + { + checkState(!stopped.get(), "ConnectorManager is stopped"); + checkNotNull(catalogName, "catalogName is null"); + checkNotNull(connector, "connector is null"); + + addConnector(catalogName, getConnectorId(catalogName), connector); + } + + private synchronized void addConnector(String catalogName, String connectorId, Connector connector) + { + checkState(!stopped.get(), "ConnectorManager is stopped"); + checkState(!connectors.containsKey(connectorId), "A connector %s already exists", connectorId); + connectors.put(connectorId, connector); + + ConnectorMetadata connectorMetadata = connector.getMetadata(); + checkState(connectorMetadata != null, "Connector %s can not provide metadata", connectorId); + + ConnectorSplitManager connectorSplitManager = connector.getSplitManager(); + checkState(connectorSplitManager != null, "Connector %s does not have a split manager", connectorId); + + Set systemTables = connector.getSystemTables(); + checkNotNull(systemTables, "Connector %s returned a null system tables set"); + + ConnectorPageSourceProvider connectorPageSourceProvider = null; + try { + connectorPageSourceProvider = connector.getPageSourceProvider(); + checkNotNull(connectorPageSourceProvider, "Connector %s returned a null page source provider", connectorId); + } + catch (UnsupportedOperationException ignored) { + } + + if (connectorPageSourceProvider == null) { + ConnectorRecordSetProvider connectorRecordSetProvider = null; + try { + connectorRecordSetProvider = connector.getRecordSetProvider(); + checkNotNull(connectorRecordSetProvider, "Connector %s returned a null record set provider", connectorId); + } + catch (UnsupportedOperationException ignored) { + } + checkState(connectorRecordSetProvider != null, "Connector %s has neither a PageSource or RecordSet provider", connectorId); + connectorPageSourceProvider = new RecordPageSourceProvider(connectorRecordSetProvider); + } + + ConnectorHandleResolver connectorHandleResolver = connector.getHandleResolver(); + checkNotNull(connectorHandleResolver, "Connector %s does not have a handle resolver", connectorId); + + ConnectorPageSinkProvider connectorPageSinkProvider = null; + try { + connectorPageSinkProvider = connector.getPageSinkProvider(); + checkNotNull(connectorPageSinkProvider, "Connector %s returned a null page sink provider", connectorId); + } + catch (UnsupportedOperationException ignored) { + } + + if (connectorPageSinkProvider == null) { + ConnectorRecordSinkProvider connectorRecordSinkProvider = null; + try { + connectorRecordSinkProvider = connector.getRecordSinkProvider(); + checkNotNull(connectorRecordSinkProvider, "Connector %s returned a null record sink provider", connectorId); + connectorPageSinkProvider = new RecordPageSinkProvider(connectorRecordSinkProvider); + } + catch (UnsupportedOperationException ignored) { + } + } + + ConnectorIndexResolver indexResolver = null; + try { + indexResolver = connector.getIndexResolver(); + checkNotNull(indexResolver, "Connector %s returned a null index resolver", connectorId); + } + catch (UnsupportedOperationException ignored) { + } + + List> tableProperties = connector.getTableProperties(); + checkNotNull(tableProperties, "Connector %s returned null table properties", connectorId); + + ConnectorAccessControl accessControl = null; + try { + accessControl = connector.getAccessControl(); + } + catch (UnsupportedOperationException ignored) { + } + + // IMPORTANT: all the instances need to be fetched from the connector *before* we add them to the corresponding managers. + // Otherwise, a broken connector would leave the managers in an inconsistent state with respect to each other + + metadataManager.addConnectorMetadata(connectorId, catalogName, connectorMetadata); + + metadataManager.addInformationSchemaMetadata(makeInformationSchemaConnectorId(connectorId), catalogName, new InformationSchemaMetadata(catalogName)); + splitManager.addConnectorSplitManager(makeInformationSchemaConnectorId(connectorId), new InformationSchemaSplitManager(nodeManager)); + pageSourceManager.addConnectorPageSourceProvider(makeInformationSchemaConnectorId(connectorId), new InformationSchemaPageSourceProvider(metadataManager)); + + Connector systemConnector = new SystemConnector(nodeManager, systemTables); + metadataManager.addSystemTablesMetadata(makeSystemTablesConnectorId(connectorId), catalogName, systemConnector.getMetadata()); + splitManager.addConnectorSplitManager(makeSystemTablesConnectorId(connectorId), systemConnector.getSplitManager()); + pageSourceManager.addConnectorPageSourceProvider(makeSystemTablesConnectorId(connectorId), new RecordPageSourceProvider(systemConnector.getRecordSetProvider())); + + splitManager.addConnectorSplitManager(connectorId, connectorSplitManager); + handleResolver.addHandleResolver(connectorId, connectorHandleResolver); + pageSourceManager.addConnectorPageSourceProvider(connectorId, connectorPageSourceProvider); + metadataManager.getSessionPropertyManager().addConnectorSessionProperties(catalogName, connector.getSessionProperties()); + metadataManager.getTablePropertyManager().addTableProperties(catalogName, tableProperties); + + if (connectorPageSinkProvider != null) { + pageSinkManager.addConnectorPageSinkProvider(connectorId, connectorPageSinkProvider); + } + + if (indexResolver != null) { + indexManager.addIndexResolver(connectorId, indexResolver); + } + + if (accessControl != null) { + accessControlManager.addCatalogAccessControl(catalogName, accessControl); + } + } + + private static String makeInformationSchemaConnectorId(String connectorId) + { + return INFORMATION_SCHEMA_CONNECTOR_PREFIX + connectorId; + } + + private static String makeSystemTablesConnectorId(String connectorId) + { + return SYSTEM_TABLES_CONNECTOR_PREFIX + connectorId; + } + + private static String getConnectorId(String catalogName) + { + // for now connectorId == catalogName + return catalogName; + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/index/IndexManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/index/IndexManager.java new file mode 100644 index 000000000..e55a6c279 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/index/IndexManager.java @@ -0,0 +1,97 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.index; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.IndexHandle; +import com.facebook.presto.metadata.ResolvedIndex; +import com.facebook.presto.metadata.TableHandle; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ConnectorIndex; +import com.facebook.presto.spi.ConnectorIndexResolver; +import com.facebook.presto.spi.ConnectorResolvedIndex; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.TupleDomain; + +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; + +public class IndexManager +{ + private final ConcurrentMap resolvers = new ConcurrentHashMap<>(); + + public void addIndexResolver(String connectorId, ConnectorIndexResolver resolver) + { + checkState(resolvers.putIfAbsent(connectorId, resolver) == null, "IndexResolver for connector '%s' is already registered", connectorId); + } + + public Optional resolveIndex( + Session session, + TableHandle tableHandle, + Set indexableColumns, + Set outputColumns, + TupleDomain tupleDomain) + { + ConnectorIndexResolver resolver = resolvers.get(tableHandle.getConnectorId()); + if (resolver == null) { + return Optional.empty(); + } + + ConnectorSession connectorSession = session.toConnectorSession(tableHandle.getConnectorId()); + ConnectorResolvedIndex resolved = resolver.resolveIndex(connectorSession, tableHandle.getConnectorHandle(), indexableColumns, outputColumns, tupleDomain); + + if (resolved == null) { + return Optional.empty(); + } + + return Optional.of(new ResolvedIndex(tableHandle.getConnectorId(), resolved)); + } + + public ConnectorIndex getIndex(Session session, IndexHandle indexHandle, List lookupSchema, List outputSchema) + { + // assumes connectorId and catalog are the same + ConnectorSession connectorSession = session.toConnectorSession(indexHandle.getConnectorId()); + return getResolver(indexHandle) + .getIndex(connectorSession, indexHandle.getConnectorHandle(), lookupSchema, outputSchema); + } + + private ConnectorIndexResolver getResolver(IndexHandle handle) + { + ConnectorIndexResolver result = resolvers.get(handle.getConnectorId()); + + checkArgument(result != null, "No index resolver for connector '%s'", handle.getConnectorId()); + + return result; + } + + /** + * NETFLIX addition + */ + public synchronized void flush(String catalogName){ + resolvers.remove(catalogName); + } + + /** + * NETFLIX addition + */ + public synchronized void flushAll(){ + resolvers.clear(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/CatalogManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/CatalogManager.java new file mode 100644 index 000000000..c99ecb654 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/CatalogManager.java @@ -0,0 +1,114 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.metadata; + +import com.facebook.presto.metadata.CatalogManagerConfig; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.io.Files; +import com.netflix.metacat.main.presto.connector.ConnectorManager; +import io.airlift.log.Logger; + +import javax.inject.Inject; +import java.io.File; +import java.io.FileInputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.collect.Maps.fromProperties; + +public class CatalogManager +{ + private static final Logger log = Logger.get(CatalogManager.class); + private final ConnectorManager connectorManager; + private final File catalogConfigurationDir; + private final AtomicBoolean catalogsLoading = new AtomicBoolean(); + private final AtomicBoolean catalogsLoaded = new AtomicBoolean(); + + @Inject + public CatalogManager(ConnectorManager connectorManager, CatalogManagerConfig config) + { + this(connectorManager, config.getCatalogConfigurationDir()); + } + + public CatalogManager(ConnectorManager connectorManager, File catalogConfigurationDir) + { + this.connectorManager = connectorManager; + this.catalogConfigurationDir = catalogConfigurationDir; + } + + public boolean areCatalogsLoaded() + { + return catalogsLoaded.get(); + } + + public void loadCatalogs() + throws Exception + { + if (!catalogsLoading.compareAndSet(false, true)) { + return; + } + + for (File file : listFiles(catalogConfigurationDir)) { + if (file.isFile() && file.getName().endsWith(".properties")) { + loadCatalog(file); + } + } + + catalogsLoaded.set(true); + } + + private void loadCatalog(File file) + throws Exception + { + log.info("-- Loading catalog %s --", file); + Map properties = new HashMap<>(loadProperties(file)); + + String connectorName = properties.remove("connector.name"); + checkState(connectorName != null, "Catalog configuration %s does not contain conector.name", file.getAbsoluteFile()); + + String catalogName = Files.getNameWithoutExtension(file.getName()); + + connectorManager.createConnection(catalogName, connectorName, ImmutableMap.copyOf(properties)); + log.info("-- Added catalog %s using connector %s --", catalogName, connectorName); + } + + private static List listFiles(File installedPluginsDir) + { + if (installedPluginsDir != null && installedPluginsDir.isDirectory()) { + File[] files = installedPluginsDir.listFiles(); + if (files != null) { + return ImmutableList.copyOf(files); + } + } + return ImmutableList.of(); + } + + private static Map loadProperties(File file) + throws Exception + { + checkNotNull(file, "file is null"); + + Properties properties = new Properties(); + try (FileInputStream in = new FileInputStream(file)) { + properties.load(in); + } + return fromProperties(properties); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/HandleResolver.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/HandleResolver.java new file mode 100644 index 000000000..fae995567 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/HandleResolver.java @@ -0,0 +1,196 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.metadata; + +import com.facebook.presto.metadata.LegacyTableLayoutHandle; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ConnectorHandleResolver; +import com.facebook.presto.spi.ConnectorIndexHandle; +import com.facebook.presto.spi.ConnectorInsertTableHandle; +import com.facebook.presto.spi.ConnectorOutputTableHandle; +import com.facebook.presto.spi.ConnectorSplit; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableLayoutHandle; + +import javax.inject.Inject; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; + +public class HandleResolver +{ + private final ConcurrentMap handleIdResolvers = new ConcurrentHashMap<>(); + + public HandleResolver() + { + } + + @Inject + public HandleResolver(Map handleIdResolvers) + { + this.handleIdResolvers.putAll(handleIdResolvers); + } + + public void addHandleResolver(String id, ConnectorHandleResolver connectorHandleResolver) + { + ConnectorHandleResolver existingResolver = handleIdResolvers.putIfAbsent(id, connectorHandleResolver); + checkState(existingResolver == null, "Id %s is already assigned to resolver %s", id, existingResolver); + } + + public String getId(ConnectorTableHandle tableHandle) + { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(tableHandle)) { + return entry.getKey(); + } + } + throw new IllegalArgumentException("No connector for table handle: " + tableHandle); + } + + public String getId(ConnectorTableLayoutHandle handle) + { + if (handle instanceof LegacyTableLayoutHandle) { + LegacyTableLayoutHandle legacyHandle = (LegacyTableLayoutHandle) handle; + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(legacyHandle.getTable())) { + return entry.getKey(); + } + } + } + else { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(handle)) { + return entry.getKey(); + } + } + } + throw new IllegalArgumentException("No connector for table handle: " + handle); + } + + public String getId(ColumnHandle columnHandle) + { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(columnHandle)) { + return entry.getKey(); + } + } + throw new IllegalArgumentException("No connector for column handle: " + columnHandle); + } + + public String getId(ConnectorSplit split) + { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(split)) { + return entry.getKey(); + } + } + throw new IllegalArgumentException("No connector for split: " + split); + } + + public String getId(ConnectorIndexHandle indexHandle) + { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(indexHandle)) { + return entry.getKey(); + } + } + throw new IllegalArgumentException("No connector for index handle: " + indexHandle); + } + + public String getId(ConnectorOutputTableHandle outputHandle) + { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(outputHandle)) { + return entry.getKey(); + } + } + throw new IllegalArgumentException("No connector for output table handle: " + outputHandle); + } + + public String getId(ConnectorInsertTableHandle insertHandle) + { + for (Entry entry : handleIdResolvers.entrySet()) { + if (entry.getValue().canHandle(insertHandle)) { + return entry.getKey(); + } + } + throw new IllegalArgumentException("No connector for insert table handle: " + insertHandle); + } + + public Class getTableHandleClass(String id) + { + return resolverFor(id).getTableHandleClass(); + } + + public Class getTableLayoutHandleClass(String id) + { + try { + return resolverFor(id).getTableLayoutHandleClass(); + } + catch (UnsupportedOperationException e) { + return LegacyTableLayoutHandle.class; + } + } + + public Class getColumnHandleClass(String id) + { + return resolverFor(id).getColumnHandleClass(); + } + + public Class getSplitClass(String id) + { + return resolverFor(id).getSplitClass(); + } + + public Class getIndexHandleClass(String id) + { + return resolverFor(id).getIndexHandleClass(); + } + + public Class getOutputTableHandleClass(String id) + { + return resolverFor(id).getOutputTableHandleClass(); + } + + public Class getInsertTableHandleClass(String id) + { + return resolverFor(id).getInsertTableHandleClass(); + } + + public ConnectorHandleResolver resolverFor(String id) + { + ConnectorHandleResolver resolver = handleIdResolvers.get(id); + checkArgument(resolver != null, "No handle resolver for %s", id); + return resolver; + } + + + /** + * NETFLIX addition + */ + public synchronized void flush(String catalogName){ + handleIdResolvers.remove(catalogName); + } + + /** + * NETFLIX addition + */ + public synchronized void flushAll(){ + handleIdResolvers.clear(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/MetadataManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/MetadataManager.java new file mode 100644 index 000000000..a63681622 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/metadata/MetadataManager.java @@ -0,0 +1,932 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.metadata; + +import com.facebook.presto.Session; +import com.facebook.presto.connector.informationSchema.InformationSchemaMetadata; +import com.facebook.presto.exception.CatalogNotFoundException; +import com.facebook.presto.metadata.FunctionInfo; +import com.facebook.presto.metadata.FunctionRegistry; +import com.facebook.presto.metadata.InsertTableHandle; +import com.facebook.presto.metadata.LegacyTableLayoutHandle; +import com.facebook.presto.metadata.Metadata; +import com.facebook.presto.metadata.OperatorNotFoundException; +import com.facebook.presto.metadata.OperatorType; +import com.facebook.presto.metadata.OutputTableHandle; +import com.facebook.presto.metadata.ParametricFunction; +import com.facebook.presto.metadata.QualifiedTableName; +import com.facebook.presto.metadata.QualifiedTablePrefix; +import com.facebook.presto.metadata.SessionPropertyManager; +import com.facebook.presto.metadata.Signature; +import com.facebook.presto.metadata.TableHandle; +import com.facebook.presto.metadata.TableLayout; +import com.facebook.presto.metadata.TableLayoutHandle; +import com.facebook.presto.metadata.TableLayoutResult; +import com.facebook.presto.metadata.TableMetadata; +import com.facebook.presto.metadata.TablePropertyManager; +import com.facebook.presto.metadata.ViewDefinition; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorDetailMetadata; +import com.facebook.presto.spi.ConnectorInsertTableHandle; +import com.facebook.presto.spi.ConnectorMetadata; +import com.facebook.presto.spi.ConnectorOutputTableHandle; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionResult; +import com.facebook.presto.spi.ConnectorSchemaMetadata; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableLayout; +import com.facebook.presto.spi.ConnectorTableLayoutResult; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.Constraint; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.SchemaTablePrefix; +import com.facebook.presto.spi.TupleDomain; +import com.facebook.presto.spi.block.BlockEncodingSerde; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.facebook.presto.spi.type.TypeSignature; +import com.facebook.presto.split.SplitManager; +import com.facebook.presto.sql.analyzer.FeaturesConfig; +import com.facebook.presto.sql.tree.QualifiedName; +import com.facebook.presto.type.TypeDeserializer; +import com.facebook.presto.type.TypeRegistry; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import io.airlift.json.JsonCodec; +import io.airlift.json.JsonCodecFactory; +import io.airlift.json.ObjectMapperProvider; +import io.airlift.slice.Slice; + +import javax.inject.Inject; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static com.facebook.presto.metadata.MetadataUtil.checkCatalogName; +import static com.facebook.presto.metadata.QualifiedTableName.convertFromSchemaTableName; +import static com.facebook.presto.metadata.TableLayout.fromConnectorLayout; +import static com.facebook.presto.metadata.ViewDefinition.ViewColumn; +import static com.facebook.presto.spi.StandardErrorCode.*; +import static com.facebook.presto.spi.StandardErrorCode.INVALID_VIEW; +import static com.facebook.presto.spi.StandardErrorCode.NOT_FOUND; +import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.facebook.presto.spi.StandardErrorCode.SYNTAX_ERROR; +import static com.facebook.presto.util.ImmutableCollectors.toImmutableList; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.collect.Iterables.transform; +import static java.lang.String.format; +import static java.util.Locale.ENGLISH; + +public class MetadataManager + implements Metadata +{ + private static final String INFORMATION_SCHEMA_NAME = "information_schema"; + + private final ConcurrentMap informationSchemasByCatalog = new ConcurrentHashMap<>(); + private final ConcurrentMap systemTablesByCatalog = new ConcurrentHashMap<>(); + private final ConcurrentMap connectorsByCatalog = new ConcurrentHashMap<>(); + private final ConcurrentMap connectorsById = new ConcurrentHashMap<>(); + private final TypeManager typeManager; + private final JsonCodec viewCodec; + private final SplitManager splitManager; + private final SessionPropertyManager sessionPropertyManager; + private final TablePropertyManager tablePropertyManager; + + public MetadataManager(FeaturesConfig featuresConfig, + TypeManager typeManager, + SplitManager splitManager, + SessionPropertyManager sessionPropertyManager, + TablePropertyManager tablePropertyManager) + { + this(featuresConfig, typeManager, createTestingViewCodec(), splitManager, + sessionPropertyManager, tablePropertyManager); + } + + @Inject + public MetadataManager(FeaturesConfig featuresConfig, + TypeManager typeManager, + JsonCodec viewCodec, + SplitManager splitManager, + SessionPropertyManager sessionPropertyManager, + TablePropertyManager tablePropertyManager) + { + this.typeManager = checkNotNull(typeManager, "types is null"); + this.viewCodec = checkNotNull(viewCodec, "viewCodec is null"); + this.splitManager = checkNotNull(splitManager, "splitManager is null"); + this.sessionPropertyManager = checkNotNull(sessionPropertyManager, "sessionPropertyManager is null"); + this.tablePropertyManager = checkNotNull(tablePropertyManager, "tablePropertyManager is null"); + } + + public static MetadataManager createTestMetadataManager() + { + FeaturesConfig featuresConfig = new FeaturesConfig(); + TypeManager typeManager = new TypeRegistry(); + SessionPropertyManager sessionPropertyManager = new SessionPropertyManager(); + SplitManager splitManager = new SplitManager(); + return new MetadataManager(featuresConfig, typeManager, splitManager, sessionPropertyManager, new TablePropertyManager()); + } + + public synchronized void addConnectorMetadata(String connectorId, String catalogName, ConnectorMetadata connectorMetadata) + { + checkMetadataArguments(connectorId, catalogName, connectorMetadata); + checkArgument(!connectorsByCatalog.containsKey(catalogName), "Catalog '%s' is already registered", catalogName); + + ConnectorMetadataEntry connectorMetadataEntry = new ConnectorMetadataEntry(connectorId, catalogName, connectorMetadata); + connectorsById.put(connectorId, connectorMetadataEntry); + connectorsByCatalog.put(catalogName, connectorMetadataEntry); + } + + public synchronized void addInformationSchemaMetadata(String connectorId, String catalogName, InformationSchemaMetadata metadata) + { + checkMetadataArguments(connectorId, catalogName, metadata); + checkArgument(!informationSchemasByCatalog.containsKey(catalogName), "Information schema for catalog '%s' is already registered", catalogName); + + ConnectorMetadataEntry connectorMetadataEntry = new ConnectorMetadataEntry(connectorId, catalogName, metadata); + connectorsById.put(connectorId, connectorMetadataEntry); + informationSchemasByCatalog.put(catalogName, connectorMetadataEntry); + } + + public synchronized void addSystemTablesMetadata(String connectorId, String catalogName, ConnectorMetadata metadata) + { + checkMetadataArguments(connectorId, catalogName, metadata); + checkArgument(!systemTablesByCatalog.containsKey(catalogName), "System tables for catalog '%s' are already registered", catalogName); + + ConnectorMetadataEntry connectorMetadataEntry = new ConnectorMetadataEntry(connectorId, catalogName, metadata); + connectorsById.put(connectorId, connectorMetadataEntry); + systemTablesByCatalog.put(catalogName, connectorMetadataEntry); + } + + private void checkMetadataArguments(String connectorId, String catalogName, ConnectorMetadata metadata) + { + checkNotNull(connectorId, "connectorId is null"); + checkNotNull(catalogName, "catalogName is null"); + checkNotNull(metadata, "metadata is null"); + checkArgument(!connectorsById.containsKey(connectorId), "Connector '%s' is already registered", connectorId); + } + + @Override + public final void verifyComparableOrderableContract() + { + } + + @Override + public Type getType(TypeSignature signature) + { + return typeManager.getType(signature); + } + + @Override + public FunctionInfo resolveFunction(QualifiedName name, List parameterTypes, boolean approximate) + { + throw new UnsupportedOperationException(); + } + + @Override + public FunctionInfo getExactFunction(Signature handle) + { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isAggregationFunction(QualifiedName name) + { + throw new UnsupportedOperationException(); + } + + @Override + public List listFunctions() + { + throw new UnsupportedOperationException(); + } + + @Override + public void addFunctions(List functionInfos) + { + throw new UnsupportedOperationException(); + } + + @Override + public FunctionInfo resolveOperator(OperatorType operatorType, List argumentTypes) + throws OperatorNotFoundException + { + throw new UnsupportedOperationException(); + } + + @Override + public List listSchemaNames(Session session, String catalogName) + { + checkCatalogName(catalogName); + ImmutableSet.Builder schemaNames = ImmutableSet.builder(); + for (ConnectorMetadataEntry entry : allConnectorsFor(catalogName)) { + schemaNames.addAll(entry.getMetadata().listSchemaNames(session.toConnectorSession(entry.getCatalog()))); + } + return ImmutableList.copyOf(schemaNames.build()); + } + + @Override + public Optional getTableHandle(Session session, QualifiedTableName table) + { + checkNotNull(table, "table is null"); + + ConnectorMetadataEntry entry = getConnectorFor(table); + if (entry != null) { + ConnectorMetadata metadata = entry.getMetadata(); + + ConnectorTableHandle tableHandle = metadata.getTableHandle(session.toConnectorSession(entry.getCatalog()), table.asSchemaTableName()); + + if (tableHandle != null) { + return Optional.of(new TableHandle(entry.getConnectorId(), tableHandle)); + } + } + return Optional.empty(); + } + + @Override + public List getLayouts(Session session, TableHandle table, Constraint constraint, Optional> desiredColumns) + { + if (constraint.getSummary().isNone()) { + return ImmutableList.of(); + } + + TupleDomain summary = constraint.getSummary(); + String connectorId = table.getConnectorId(); + ConnectorTableHandle connectorTable = table.getConnectorHandle(); + Predicate> predicate = constraint.predicate(); + + List layouts; + ConnectorMetadataEntry entry = getConnectorMetadata(connectorId); + ConnectorSession connectorSession = session.toConnectorSession(entry.getCatalog()); + try { + layouts = entry.getMetadata().getTableLayouts(connectorSession, connectorTable, new Constraint<>(summary, predicate::test), desiredColumns); + } + catch (UnsupportedOperationException e) { + ConnectorSplitManager connectorSplitManager = splitManager.getConnectorSplitManager(connectorId); + ConnectorPartitionResult result = connectorSplitManager.getPartitions(connectorSession, connectorTable, summary); + + List partitions = result.getPartitions().stream() + .filter(partition -> predicate.test(partition.getTupleDomain().extractFixedValues())) + .collect(toImmutableList()); + + List> partitionDomains = partitions.stream() + .map(ConnectorPartition::getTupleDomain) + .collect(toImmutableList()); + + TupleDomain effectivePredicate = TupleDomain.none(); + if (!partitionDomains.isEmpty()) { + effectivePredicate = TupleDomain.columnWiseUnion(partitionDomains); + } + + ConnectorTableLayout layout = new ConnectorTableLayout(new LegacyTableLayoutHandle(connectorTable, partitions), Optional.empty(), effectivePredicate, Optional.empty(), Optional.of(partitionDomains), ImmutableList.of()); + layouts = ImmutableList.of(new ConnectorTableLayoutResult(layout, result.getUndeterminedTupleDomain())); + } + + return layouts.stream() + .map(layout -> new TableLayoutResult(fromConnectorLayout(connectorId, layout.getTableLayout()), layout.getUnenforcedConstraint())) + .collect(toImmutableList()); + } + + @Override + public TableLayout getLayout(Session session, TableLayoutHandle handle) + { + if (handle.getConnectorHandle() instanceof LegacyTableLayoutHandle) { + LegacyTableLayoutHandle legacyHandle = (LegacyTableLayoutHandle) handle.getConnectorHandle(); + List> partitionDomains = legacyHandle.getPartitions().stream() + .map(ConnectorPartition::getTupleDomain) + .collect(toImmutableList()); + + TupleDomain predicate = TupleDomain.none(); + if (!partitionDomains.isEmpty()) { + predicate = TupleDomain.columnWiseUnion(partitionDomains); + } + return new TableLayout(handle, new ConnectorTableLayout(legacyHandle, Optional.empty(), predicate, Optional.empty(), Optional.of(partitionDomains), ImmutableList.of())); + } + + String connectorId = handle.getConnectorId(); + ConnectorMetadataEntry entry = getConnectorMetadata(connectorId); + return fromConnectorLayout(connectorId, entry.getMetadata() + .getTableLayout(session.toConnectorSession(entry.getCatalog()), handle.getConnectorHandle())); + } + + @Override + public TableMetadata getTableMetadata(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + ConnectorTableMetadata tableMetadata = entry.getMetadata().getTableMetadata( + session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle()); + + return new TableMetadata(tableHandle.getConnectorId(), tableMetadata); + } + + @Override + public Map getColumnHandles(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + return entry.getMetadata().getColumnHandles(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle()); + } + + @Override + public ColumnMetadata getColumnMetadata(Session session, TableHandle tableHandle, ColumnHandle columnHandle) + { + checkNotNull(tableHandle, "tableHandle is null"); + checkNotNull(columnHandle, "columnHandle is null"); + + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + return entry.getMetadata().getColumnMetadata(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle(), columnHandle); + } + + @Override + public List listTables(Session session, QualifiedTablePrefix prefix) + { + checkNotNull(prefix, "prefix is null"); + + String schemaNameOrNull = prefix.getSchemaName().orElse(null); + Set tables = new LinkedHashSet<>(); + for (ConnectorMetadataEntry entry : allConnectorsFor(prefix.getCatalogName())) { + ConnectorSession connectorSession = session.toConnectorSession(entry.getCatalog()); + for (QualifiedTableName tableName : transform(entry.getMetadata().listTables(connectorSession, schemaNameOrNull), convertFromSchemaTableName(prefix.getCatalogName()))) { + tables.add(tableName); + } + } + return ImmutableList.copyOf(tables); + } + + @Override + public Optional getSampleWeightColumnHandle(Session session, TableHandle tableHandle) + { + checkNotNull(tableHandle, "tableHandle is null"); + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + ColumnHandle handle = entry.getMetadata().getSampleWeightColumnHandle(session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle()); + + return Optional.ofNullable(handle); + } + + @Override + public boolean canCreateSampledTables(Session session, String catalogName) + { + ConnectorMetadataEntry connectorMetadata = connectorsByCatalog.get(catalogName); + checkArgument(connectorMetadata != null, "Catalog %s does not exist", catalogName); + return connectorMetadata.getMetadata().canCreateSampledTables(session.toConnectorSession(connectorMetadata.getCatalog())); + } + + @Override + public Map> listTableColumns(Session session, QualifiedTablePrefix prefix) + { + checkNotNull(prefix, "prefix is null"); + SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix(); + + Map> tableColumns = new HashMap<>(); + for (ConnectorMetadataEntry connectorMetadata : allConnectorsFor(prefix.getCatalogName())) { + ConnectorMetadata metadata = connectorMetadata.getMetadata(); + + ConnectorSession connectorSession = session.toConnectorSession(connectorMetadata.getCatalog()); + for (Entry> entry : metadata.listTableColumns(connectorSession, tablePrefix).entrySet()) { + QualifiedTableName tableName = new QualifiedTableName( + prefix.getCatalogName(), + entry.getKey().getSchemaName(), + entry.getKey().getTableName()); + tableColumns.put(tableName, entry.getValue()); + } + + // if table and view names overlap, the view wins + for (Entry entry : metadata.getViews(connectorSession, tablePrefix).entrySet()) { + QualifiedTableName tableName = new QualifiedTableName( + prefix.getCatalogName(), + entry.getKey().getSchemaName(), + entry.getKey().getTableName()); + + ImmutableList.Builder columns = ImmutableList.builder(); + for (ViewColumn column : deserializeView(entry.getValue()).getColumns()) { + columns.add(new ColumnMetadata(column.getName(), column.getType(), false)); + } + + tableColumns.put(tableName, columns.build()); + } + } + return ImmutableMap.copyOf(tableColumns); + } + + @Override + public void createTable(Session session, String catalogName, TableMetadata tableMetadata) + { + ConnectorMetadataEntry connectorMetadata = connectorsByCatalog.get(catalogName); + checkArgument(connectorMetadata != null, "Catalog %s does not exist", catalogName); + + connectorMetadata.getMetadata().createTable(session.toConnectorSession(connectorMetadata.getCatalog()), + tableMetadata.getMetadata()); + } + + @Override + public void renameTable(Session session, TableHandle tableHandle, QualifiedTableName newTableName) + { + String catalogName = newTableName.getCatalogName(); + ConnectorMetadataEntry target = connectorsByCatalog.get(catalogName); + if (target == null) { + throw new PrestoException(NOT_FOUND, format("Target catalog '%s' does not exist", catalogName)); + } + if (!tableHandle.getConnectorId().equals(target.getConnectorId())) { + throw new PrestoException(SYNTAX_ERROR, "Cannot rename tables across catalogs"); + } + + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().renameTable(session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle(), newTableName.asSchemaTableName()); + } + + @Override + public void renameColumn(Session session, TableHandle tableHandle, ColumnHandle source, String target) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().renameColumn(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle(), source, target.toLowerCase(ENGLISH)); + } + + @Override + public void dropTable(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().dropTable(session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle()); + } + + @Override + public OutputTableHandle beginCreateTable(Session session, String catalogName, TableMetadata tableMetadata) + { + ConnectorMetadataEntry connectorMetadata = connectorsByCatalog.get(catalogName); + checkArgument(connectorMetadata != null, "Catalog %s does not exist", catalogName); + ConnectorSession connectorSession = session.toConnectorSession(connectorMetadata.getCatalog()); + ConnectorOutputTableHandle handle = connectorMetadata.getMetadata().beginCreateTable(connectorSession, tableMetadata.getMetadata()); + return new OutputTableHandle(connectorMetadata.getConnectorId(), handle); + } + + @Override + public void commitCreateTable(Session session, OutputTableHandle tableHandle, Collection fragments) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().commitCreateTable(session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle(), fragments); + } + + @Override + public void rollbackCreateTable(Session session, OutputTableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().rollbackCreateTable(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle()); + } + + @Override + public InsertTableHandle beginInsert(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + ConnectorInsertTableHandle handle = entry.getMetadata().beginInsert( + session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle()); + return new InsertTableHandle(tableHandle.getConnectorId(), handle); + } + + @Override + public void commitInsert(Session session, InsertTableHandle tableHandle, Collection fragments) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().commitInsert(session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle(), fragments); + } + + @Override + public void rollbackInsert(Session session, InsertTableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().rollbackInsert(session.toConnectorSession(entry.getCatalog()), tableHandle.getConnectorHandle()); + } + + @Override + public ColumnHandle getUpdateRowIdColumnHandle(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + return entry.getMetadata().getUpdateRowIdColumnHandle(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle()); + } + + @Override + public TableHandle beginDelete(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + ConnectorTableHandle newHandle = entry.getMetadata().beginDelete(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle()); + return new TableHandle(tableHandle.getConnectorId(), newHandle); + } + + @Override + public void commitDelete(Session session, TableHandle tableHandle, Collection fragments) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().commitDelete(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle(), fragments); + } + + @Override + public void rollbackDelete(Session session, TableHandle tableHandle) + { + ConnectorMetadataEntry entry = lookupConnectorFor(tableHandle); + entry.getMetadata().rollbackDelete(session.toConnectorSession(entry.getCatalog()), + tableHandle.getConnectorHandle()); + } + + @Override + public Map getCatalogNames() + { + ImmutableMap.Builder catalogsMap = ImmutableMap.builder(); + for (Map.Entry entry : connectorsByCatalog.entrySet()) { + catalogsMap.put(entry.getKey(), entry.getValue().getConnectorId()); + } + return catalogsMap.build(); + } + + @Override + public List listViews(Session session, QualifiedTablePrefix prefix) + { + checkNotNull(prefix, "prefix is null"); + + String schemaNameOrNull = prefix.getSchemaName().orElse(null); + Set views = new LinkedHashSet<>(); + for (ConnectorMetadataEntry entry : allConnectorsFor(prefix.getCatalogName())) { + ConnectorSession connectorSession = session.toConnectorSession(entry.getCatalog()); + for (QualifiedTableName tableName : transform(entry.getMetadata().listViews(connectorSession, schemaNameOrNull), convertFromSchemaTableName(prefix.getCatalogName()))) { + views.add(tableName); + } + } + return ImmutableList.copyOf(views); + } + + @Override + public Map getViews(Session session, QualifiedTablePrefix prefix) + { + checkNotNull(prefix, "prefix is null"); + SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix(); + + Map views = new LinkedHashMap<>(); + for (ConnectorMetadataEntry metadata : allConnectorsFor(prefix.getCatalogName())) { + ConnectorSession connectorSession = session.toConnectorSession(metadata.getCatalog()); + for (Entry entry : metadata.getMetadata().getViews(connectorSession, tablePrefix).entrySet()) { + QualifiedTableName viewName = new QualifiedTableName( + prefix.getCatalogName(), + entry.getKey().getSchemaName(), + entry.getKey().getTableName()); + views.put(viewName, deserializeView(entry.getValue())); + } + } + return ImmutableMap.copyOf(views); + } + + @Override + public Optional getView(Session session, QualifiedTableName viewName) + { + ConnectorMetadataEntry entry = getConnectorFor(viewName); + if (entry != null) { + SchemaTablePrefix prefix = viewName.asSchemaTableName().toSchemaTablePrefix(); + Map views = entry.getMetadata().getViews(session.toConnectorSession(entry.getCatalog()), prefix); + String view = views.get(viewName.asSchemaTableName()); + if (view != null) { + return Optional.of(deserializeView(view)); + } + } + return Optional.empty(); + } + + @Override + public void createView(Session session, QualifiedTableName viewName, String viewData, boolean replace) + { + ConnectorMetadataEntry connectorMetadata = connectorsByCatalog.get(viewName.getCatalogName()); + checkArgument(connectorMetadata != null, "Catalog %s does not exist", viewName.getCatalogName()); + connectorMetadata.getMetadata().createView(session.toConnectorSession(connectorMetadata.getCatalog()), viewName.asSchemaTableName(), viewData, replace); + } + + @Override + public void dropView(Session session, QualifiedTableName viewName) + { + ConnectorMetadataEntry connectorMetadata = connectorsByCatalog.get(viewName.getCatalogName()); + checkArgument(connectorMetadata != null, "Catalog %s does not exist", viewName.getCatalogName()); + connectorMetadata.getMetadata().dropView(session.toConnectorSession(connectorMetadata.getCatalog()), + viewName.asSchemaTableName()); + } + + @Override + public FunctionRegistry getFunctionRegistry() + { + throw new UnsupportedOperationException(); + } + + @Override + public TypeManager getTypeManager() + { + return typeManager; + } + + @Override + public BlockEncodingSerde getBlockEncodingSerde() + { + throw new UnsupportedOperationException(); + } + + @Override + public SessionPropertyManager getSessionPropertyManager() + { + return sessionPropertyManager; + } + + @Override + public TablePropertyManager getTablePropertyManager() + { + return tablePropertyManager; + } + + private ViewDefinition deserializeView(String data) + { + try { + return viewCodec.fromJson(data); + } + catch (IllegalArgumentException e) { + throw new PrestoException(INVALID_VIEW, "Invalid view JSON: " + data, e); + } + } + + private List allConnectorsFor(String catalogName) + { + ImmutableList.Builder builder = ImmutableList.builder(); + + ConnectorMetadataEntry entry = informationSchemasByCatalog.get(catalogName); + if (entry != null) { + builder.add(entry); + } + + ConnectorMetadataEntry systemTables = systemTablesByCatalog.get(catalogName); + if (systemTables != null) { + builder.add(systemTables); + } + + ConnectorMetadataEntry connector = connectorsByCatalog.get(catalogName); + if (connector != null) { + builder.add(connector); + } + + return builder.build(); + } + + private ConnectorMetadataEntry getConnectorFor(QualifiedTableName name) + { + String catalog = name.getCatalogName(); + String schema = name.getSchemaName(); + + if (schema.equals(INFORMATION_SCHEMA_NAME)) { + return informationSchemasByCatalog.get(catalog); + } + + ConnectorMetadataEntry entry = systemTablesByCatalog.get(catalog); + if ((entry != null) && (entry.getMetadata().getTableHandle(null, name.asSchemaTableName()) != null)) { + return entry; + } + + return connectorsByCatalog.get(catalog); + } + + private ConnectorMetadataEntry lookupConnectorFor(TableHandle tableHandle) + { + return getConnectorMetadata(tableHandle.getConnectorId()); + } + + private ConnectorMetadataEntry lookupConnectorFor(OutputTableHandle tableHandle) + { + return getConnectorMetadata(tableHandle.getConnectorId()); + } + + private ConnectorMetadataEntry lookupConnectorFor(InsertTableHandle tableHandle) + { + return getConnectorMetadata(tableHandle.getConnectorId()); + } + + private ConnectorMetadataEntry getConnectorMetadata(String connectorId) + { + ConnectorMetadataEntry result = connectorsById.get(connectorId); + checkArgument(result != null, "No connector for connector ID: %s", connectorId); + return result; + } + + private static class ConnectorMetadataEntry + { + private final String connectorId; + private final String catalog; + private final ConnectorMetadata metadata; + + private ConnectorMetadataEntry(String connectorId, String catalog, ConnectorMetadata metadata) + { + this.connectorId = checkNotNull(connectorId, "connectorId is null"); + this.catalog = checkNotNull(catalog, "catalog is null"); + this.metadata = checkNotNull(metadata, "metadata is null"); + } + + private String getConnectorId() + { + return connectorId; + } + + private String getCatalog() + { + return catalog; + } + + private ConnectorMetadata getMetadata() + { + return metadata; + } + } + + private static JsonCodec createTestingViewCodec() + { + ObjectMapperProvider provider = new ObjectMapperProvider(); + provider.setJsonDeserializers(ImmutableMap., JsonDeserializer>of(Type.class, new TypeDeserializer(new TypeRegistry()))); + return new JsonCodecFactory(provider).jsonCodec(ViewDefinition.class); + } + + + ////////////////////////////////////////////////////////////////////////////////////////////////// + + /** + * NETFLIX addition + */ + public synchronized void flush(String catalogName){ + informationSchemasByCatalog.remove(catalogName); + connectorsByCatalog.remove(catalogName); + connectorsById.remove(catalogName); + } + + /** + * NETFLIX addition + */ + public synchronized void flushAll(){ + informationSchemasByCatalog.clear(); + connectorsByCatalog.clear(); + connectorsById.clear(); + } + + /** + * Creates a schema with the given schemaName + * @param session connector session + */ + public void createSchema(Session session, ConnectorSchemaMetadata schemaMetadata) { + String schemaName = session.getSchema(); + checkArgument(schemaName != null, "Schema cannot be null"); + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + + ConnectorMetadata metadata = entry.getMetadata(); + if (!(metadata instanceof ConnectorDetailMetadata)) { + throw new PrestoException(NOT_SUPPORTED, + "Create schema not supported for connector " + entry.getConnectorId()); + } + + ConnectorDetailMetadata detailMetadata = (ConnectorDetailMetadata) metadata; + detailMetadata.createSchema(session.toConnectorSession(), schemaMetadata); + } + + /** + * Updates a schema with the given schemaName + * @param session connector session + */ + public void updateSchema(Session session, ConnectorSchemaMetadata schemaMetadata) { + String schemaName = session.getSchema(); + checkArgument(schemaName != null, "Schema cannot be null"); + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + + ConnectorMetadata metadata = entry.getMetadata(); + if (!(metadata instanceof ConnectorDetailMetadata)) { + throw new PrestoException(NOT_SUPPORTED, + "Update schema not supported for connector " + entry.getConnectorId()); + } + + ConnectorDetailMetadata detailMetadata = (ConnectorDetailMetadata) metadata; + detailMetadata.updateSchema(session.toConnectorSession(), schemaMetadata); + } + + /** + * Drop a schema with the given schemaName + */ + public void dropSchema(Session session) { + String schemaName = session.getSchema(); + checkArgument(schemaName != null, "Schema cannot be null"); + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + + ConnectorMetadata metadata = entry.getMetadata(); + if (!(metadata instanceof ConnectorDetailMetadata)) { + throw new PrestoException(NOT_SUPPORTED, + "Drop schema not supported for connector " + entry.getConnectorId()); + } + + ConnectorDetailMetadata detailMetadata = (ConnectorDetailMetadata) metadata; + detailMetadata.dropSchema(session.toConnectorSession(), schemaName); + } + + /** + * Return a schema with the given schemaName + */ + public ConnectorSchemaMetadata getSchema(Session session) { + String schemaName = session.getSchema(); + checkArgument(schemaName != null, "Schema cannot be null"); + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + ConnectorSchemaMetadata result; + ConnectorMetadata metadata = entry.getMetadata(); + if (metadata instanceof ConnectorDetailMetadata) { + result = ((ConnectorDetailMetadata) metadata).getSchema(session.toConnectorSession(), schemaName); + } else { + result = new ConnectorSchemaMetadata(schemaName); + } + + return result; + } + + /** + * Updates a table using the specified table metadata. + */ + public ConnectorTableHandle alterTable(Session session, TableMetadata tableMetadata) { + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + + ConnectorMetadata metadata = entry.getMetadata(); + if (!(metadata instanceof ConnectorDetailMetadata)) { + throw new PrestoException(NOT_SUPPORTED, + "Alter table not supported for connector " + entry.getConnectorId()); + } + + ConnectorDetailMetadata detailMetadata = (ConnectorDetailMetadata) metadata; + return detailMetadata.alterTable(session.toConnectorSession(), tableMetadata.getMetadata()); + } + + public List getTableNames(Session session, String uri, boolean prefixSearch){ + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + + ConnectorMetadata metadata = entry.getMetadata(); + if( metadata instanceof ConnectorDetailMetadata){ + ConnectorDetailMetadata detailMetadata = (ConnectorDetailMetadata) metadata; + return detailMetadata.getTableNames(uri, prefixSearch); + } + return Lists.newArrayList(); + } + + public List listTableMetadatas(Session session, String schemaName, List tableNames){ + ConnectorMetadataEntry entry = validateCatalogName(session.getCatalog()); + ConnectorMetadata metadata = entry.getMetadata(); + if( metadata instanceof ConnectorDetailMetadata){ + List result = Lists.newArrayList(); + ConnectorDetailMetadata detailMetadata = (ConnectorDetailMetadata) metadata; + List cdm = detailMetadata + .listTableMetadatas(session.toConnectorSession(), schemaName, tableNames); + if( cdm != null){ + + cdm.forEach( + connectorTableMetadata -> result.add( new TableMetadata(session.getCatalog(), connectorTableMetadata))); + } + return result; + } else { + return tableNames.stream().map(tableName -> { + TableMetadata result = null; + Optional tableHandle = getTableHandle(session, new QualifiedTableName(session.getCatalog(), schemaName, tableName)); + if( tableHandle.isPresent()){ + result = getTableMetadata(session, tableHandle.get()); + } + return result; + }).filter(tableMetadata -> tableMetadata != null).collect(Collectors.toList()); + } + } + + public ConnectorMetadataEntry validateCatalogName(String catalogName) { + ConnectorMetadataEntry connectorMetadata = connectorsByCatalog.get(catalogName); + if (connectorMetadata == null) { + throw new CatalogNotFoundException(catalogName); + } + + return connectorMetadata; + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/PageSinkManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/PageSinkManager.java new file mode 100644 index 000000000..136038195 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/PageSinkManager.java @@ -0,0 +1,75 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.split; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.InsertTableHandle; +import com.facebook.presto.metadata.OutputTableHandle; +import com.facebook.presto.spi.ConnectorPageSink; +import com.facebook.presto.spi.ConnectorPageSinkProvider; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.split.PageSinkProvider; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.google.common.base.Preconditions.checkArgument; + +public class PageSinkManager + implements PageSinkProvider +{ + private final ConcurrentMap pageSinkProviders = new ConcurrentHashMap<>(); + + public void addConnectorPageSinkProvider(String connectorId, ConnectorPageSinkProvider connectorPageSinkProvider) + { + pageSinkProviders.put(connectorId, connectorPageSinkProvider); + } + + @Override + public ConnectorPageSink createPageSink(Session session, OutputTableHandle tableHandle) + { + // assumes connectorId and catalog are the same + ConnectorSession connectorSession = session.toConnectorSession(tableHandle.getConnectorId()); + return providerFor(tableHandle.getConnectorId()).createPageSink(connectorSession, tableHandle.getConnectorHandle()); + } + + @Override + public ConnectorPageSink createPageSink(Session session, InsertTableHandle tableHandle) + { + // assumes connectorId and catalog are the same + ConnectorSession connectorSession = session.toConnectorSession(tableHandle.getConnectorId()); + return providerFor(tableHandle.getConnectorId()).createPageSink(connectorSession, tableHandle.getConnectorHandle()); + } + + private ConnectorPageSinkProvider providerFor(String connectorId) + { + ConnectorPageSinkProvider provider = pageSinkProviders.get(connectorId); + checkArgument(provider != null, "No page sink provider for connector '%s'", connectorId); + return provider; + } + + /** + * NETFLIX addition + */ + public synchronized void flush(String catalogName){ + pageSinkProviders.remove(catalogName); + } + + /** + * NETFLIX addition + */ + public synchronized void flushAll(){ + pageSinkProviders.clear(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/PageSourceManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/PageSourceManager.java new file mode 100644 index 000000000..9233ea15b --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/PageSourceManager.java @@ -0,0 +1,74 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.split; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.Split; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ConnectorPageSource; +import com.facebook.presto.spi.ConnectorPageSourceProvider; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.split.PageSourceProvider; + +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +public class PageSourceManager + implements PageSourceProvider +{ + private final ConcurrentMap pageSourceProviders = new ConcurrentHashMap<>(); + + public void addConnectorPageSourceProvider(String connectorId, ConnectorPageSourceProvider connectorPageSourceProvider) + { + pageSourceProviders.put(connectorId, connectorPageSourceProvider); + } + + @Override + public ConnectorPageSource createPageSource(Session session, Split split, List columns) + { + checkNotNull(split, "split is null"); + checkNotNull(columns, "columns is null"); + + // assumes connectorId and catalog are the same + ConnectorSession connectorSession = session.toConnectorSession(split.getConnectorId()); + return getPageSourceProvider(split).createPageSource(connectorSession, split.getConnectorSplit(), columns); + } + + private ConnectorPageSourceProvider getPageSourceProvider(Split split) + { + ConnectorPageSourceProvider provider = pageSourceProviders.get(split.getConnectorId()); + + checkArgument(provider != null, "No page stream provider for '%s", split.getConnectorId()); + + return provider; + } + + /** + * NETFLIX addition + */ + public synchronized void flush(String catalogName){ + pageSourceProviders.remove(catalogName); + } + + /** + * NETFLIX addition + */ + public synchronized void flushAll(){ + pageSourceProviders.clear(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/SplitManager.java b/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/SplitManager.java new file mode 100644 index 000000000..f25f69e8f --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/presto/split/SplitManager.java @@ -0,0 +1,148 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.metacat.main.presto.split; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.LegacyTableLayoutHandle; +import com.facebook.presto.metadata.TableHandle; +import com.facebook.presto.metadata.TableLayoutHandle; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionResult; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorSplit; +import com.facebook.presto.spi.ConnectorSplitDetailManager; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.ConnectorSplitSource; +import com.facebook.presto.spi.FixedSplitSource; +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.SavePartitionResult; +import com.facebook.presto.spi.SchemaTablePartitionName; +import com.facebook.presto.spi.Sort; +import com.facebook.presto.spi.TupleDomain; +import com.facebook.presto.split.ConnectorAwareSplitSource; +import com.facebook.presto.split.SplitSource; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; + +public class SplitManager extends com.facebook.presto.split.SplitManager +{ + private final ConcurrentMap splitManagers = new ConcurrentHashMap<>(); + + public void addConnectorSplitManager(String connectorId, ConnectorSplitManager connectorSplitManager) + { + checkState(splitManagers.putIfAbsent(connectorId, connectorSplitManager) == null, "SplitManager for connector '%s' is already registered", connectorId); + } + + public SplitSource getSplits(Session session, TableLayoutHandle layout) + { + String connectorId = layout.getConnectorId(); + ConnectorSplitManager splitManager = getConnectorSplitManager(connectorId); + + // assumes connectorId and catalog are the same + ConnectorSession connectorSession = session.toConnectorSession(connectorId); + + ConnectorSplitSource source; + if (layout.getConnectorHandle() instanceof LegacyTableLayoutHandle) { + LegacyTableLayoutHandle handle = (LegacyTableLayoutHandle) layout.getConnectorHandle(); + if (handle.getPartitions().isEmpty()) { + return new ConnectorAwareSplitSource(connectorId, new FixedSplitSource(connectorId, ImmutableList.of())); + } + + source = splitManager.getPartitionSplits(connectorSession, handle.getTable(), handle.getPartitions()); + } + else { + source = splitManager.getSplits(connectorSession, layout.getConnectorHandle()); + } + + return new ConnectorAwareSplitSource(connectorId, source); + } + + public ConnectorSplitManager getConnectorSplitManager(String connectorId) + { + ConnectorSplitManager result = splitManagers.get(connectorId); + checkArgument(result != null, "No split manager for connector '%s'", connectorId); + + return result; + } + + /////////////////////////////////////////////////////////////////////////////////////// + + /** + * NETFLIX addition + */ + public synchronized void flush(String catalogName){ + splitManagers.remove(catalogName); + } + + /** + * NETFLIX addition + */ + public synchronized void flushAll(){ + splitManagers.clear(); + } + + public SavePartitionResult savePartitions(TableHandle table, List partitions + , List partitionIdsForDeletes, boolean checkIfExists){ + ConnectorSplitManager splitManager = getConnectorSplitManager(table.getConnectorId()); + if( splitManager instanceof ConnectorSplitDetailManager){ + return ((ConnectorSplitDetailManager) splitManager).savePartitions(table.getConnectorHandle(), partitions, partitionIdsForDeletes, checkIfExists); + } else { + throw new UnsupportedOperationException("Operation not supported"); + } + } + + public ConnectorPartitionResult getPartitions(TableHandle table, String filter, List partitionNames, Sort sort, Pageable pageable, boolean includePartitionDetails){ + ConnectorSplitManager splitManager = getConnectorSplitManager(table.getConnectorId()); + if( splitManager instanceof ConnectorSplitDetailManager){ + return ((ConnectorSplitDetailManager) splitManager).getPartitions( table.getConnectorHandle(), filter, partitionNames, sort, pageable, includePartitionDetails); + } else { + throw new UnsupportedOperationException("Operation not supported"); + } + } + + public Integer getPartitionCount(Session session, TableHandle table){ + ConnectorSplitManager splitManager = getConnectorSplitManager(table.getConnectorId()); + if( splitManager instanceof ConnectorSplitDetailManager){ + return ((ConnectorSplitDetailManager) splitManager).getPartitionCount(table.getConnectorHandle()); + } else { + return splitManager.getPartitions(session.toConnectorSession(), table.getConnectorHandle(), TupleDomain.all()).getPartitions().size(); + } + } + + public void deletePartitions(TableHandle table, List partitionIds){ + ConnectorSplitManager splitManager = getConnectorSplitManager(table.getConnectorId()); + if( splitManager instanceof ConnectorSplitDetailManager){ + ((ConnectorSplitDetailManager) splitManager).deletePartitions( table.getConnectorHandle(), partitionIds); + } else { + throw new UnsupportedOperationException("Operation not supported"); + } + } + + public List getPartitionNames(Session session, String uri, boolean prefixSearch){ + ConnectorSplitManager splitManager = getConnectorSplitManager(session.getCatalog()); + if( splitManager instanceof ConnectorSplitDetailManager){ + ConnectorSplitDetailManager splitDetailManager = (ConnectorSplitDetailManager) splitManager; + return splitDetailManager.getPartitionNames(uri, prefixSearch); + } + return Lists.newArrayList(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogService.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogService.java new file mode 100644 index 000000000..686b48023 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/CatalogService.java @@ -0,0 +1,24 @@ +package com.netflix.metacat.main.services; + +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.CatalogMappingDto; + +import javax.annotation.Nonnull; +import java.util.List; + +public interface CatalogService { + /** + * @return the information about the given catalog + * @throws javax.ws.rs.NotFoundException if the catalog is not found + */ + @Nonnull + CatalogDto get(@Nonnull QualifiedName name); + + /** + * @return all of the registered catalogs + * @throws javax.ws.rs.NotFoundException if there are no registered catalogs + */ + @Nonnull + List getCatalogNames(); +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/DatabaseService.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/DatabaseService.java new file mode 100644 index 000000000..c43591021 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/DatabaseService.java @@ -0,0 +1,17 @@ +package com.netflix.metacat.main.services; + +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.DatabaseCreateRequestDto; +import com.netflix.metacat.common.dto.DatabaseDto; + +public interface DatabaseService { + void create(QualifiedName name, DatabaseCreateRequestDto databaseCreateRequestDto); + + void update(QualifiedName name, DatabaseCreateRequestDto databaseCreateRequestDto); + + void delete(QualifiedName name); + + DatabaseDto get(QualifiedName name, boolean includeUserMetadata); + + boolean exists(QualifiedName name); +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/MViewService.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/MViewService.java new file mode 100644 index 000000000..4fc848dc0 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/MViewService.java @@ -0,0 +1,40 @@ +package com.netflix.metacat.main.services; + +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.Sort; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.NameDateDto; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; +import com.netflix.metacat.common.dto.TableDto; + +import javax.annotation.Nonnull; +import java.util.List; +import java.util.Optional; + +public interface MViewService { + /** + * Create the view and returns the newly created view + * @param name name of the origin table + * @return view + */ + TableDto create( @Nonnull QualifiedName name); + + /** + * Deletes the view and returns the deleted view. + * @param name name of the view to be deleted + * @return deleted view + */ + TableDto delete( @Nonnull QualifiedName name); + void update(@Nonnull QualifiedName name, @Nonnull TableDto tableDto); + Optional get( @Nonnull QualifiedName name); + void snapshotPartitions( @Nonnull QualifiedName name, String filter); + PartitionsSaveResponseDto savePartitions( @Nonnull QualifiedName name, List partitionDtos, List partitionIdsForDeletes, boolean merge, boolean checkIfExists); + void deletePartitions( @Nonnull QualifiedName name, List partitionIds); + List listPartitions(@Nonnull QualifiedName name, String filter, List partitionNames, Sort sort, Pageable pageable, boolean includeUserMetadata, boolean includePartitionDetails); + Integer partitionCount(@Nonnull QualifiedName name); + List list(@Nonnull QualifiedName qualifiedName); + void saveMetadata(@Nonnull QualifiedName name, ObjectNode definitionMetadata, ObjectNode dataMetadata); + void rename(QualifiedName name, QualifiedName newViewName); +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/PartitionService.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/PartitionService.java new file mode 100644 index 000000000..3a780730e --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/PartitionService.java @@ -0,0 +1,17 @@ +package com.netflix.metacat.main.services; + +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.Sort; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; + +import java.util.List; + +public interface PartitionService { + List list(QualifiedName name, String filter, List partitionNames, Sort sort, Pageable pageable, boolean includeUserDefinitionMetadata, boolean includeUserDataMetadata, boolean includePartitionDetails); + Integer count(QualifiedName name); + PartitionsSaveResponseDto save(QualifiedName name, List partitionDtos, List partitionIdsForDeletes, boolean checkIfExists); + void delete(QualifiedName name, List partitionIds); + List getQualifiedNames(String uri, boolean prefixSearch); +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/ServicesModule.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/ServicesModule.java new file mode 100644 index 000000000..f5828e7cb --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/ServicesModule.java @@ -0,0 +1,33 @@ +package com.netflix.metacat.main.services; + +import com.google.inject.AbstractModule; +import com.google.inject.Scopes; +import com.netflix.metacat.main.services.impl.CatalogServiceImpl; +import com.netflix.metacat.main.services.impl.DatabaseServiceImpl; +import com.netflix.metacat.main.services.impl.MViewServiceImpl; +import com.netflix.metacat.main.services.impl.PartitionServiceImpl; +import com.netflix.metacat.main.services.impl.TableServiceImpl; +import com.netflix.metacat.main.services.search.ElasticSearchClientProvider; +import com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh; +import com.netflix.metacat.main.services.search.ElasticSearchUtil; +import com.netflix.metacat.main.services.search.MetacatEventHandlers; +import org.elasticsearch.client.Client; + +import javax.inject.Singleton; + +public class ServicesModule extends AbstractModule { + @Override + protected void configure() { + binder().bind(CatalogService.class).to(CatalogServiceImpl.class).in(Scopes.SINGLETON); + binder().bind(DatabaseService.class).to(DatabaseServiceImpl.class).in(Scopes.SINGLETON); + binder().bind(TableService.class).to(TableServiceImpl.class).in(Scopes.SINGLETON); + binder().bind(PartitionService.class).to(PartitionServiceImpl.class).in(Scopes.SINGLETON); + binder().bind(MViewService.class).to(MViewServiceImpl.class).in(Scopes.SINGLETON); + + //search + bind(Client.class).toProvider(ElasticSearchClientProvider.class).in(Singleton.class); + binder().bind(MetacatEventHandlers.class).in(Singleton.class); + binder().bind(ElasticSearchUtil.class).in(Singleton.class); + binder().bind(ElasticSearchMetacatRefresh.class).in(Singleton.class); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/SessionProvider.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/SessionProvider.java new file mode 100644 index 000000000..951f039b1 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/SessionProvider.java @@ -0,0 +1,48 @@ +package com.netflix.metacat.main.services; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.SessionPropertyManager; +import com.facebook.presto.spi.security.Identity; +import com.google.inject.Inject; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.util.MetacatContextManager; +import com.netflix.metacat.main.connector.MetacatConnectorManager; + +import java.util.Optional; + +import static com.facebook.presto.spi.type.TimeZoneKey.UTC_KEY; +import static java.util.Locale.ENGLISH; + +public class SessionProvider { + @Inject + MetacatConnectorManager metacatConnectorManager; + @Inject + SessionPropertyManager sessionPropertyManager; + + public Session getSession(QualifiedName name) { + if (name.isDatabaseDefinition() || name.isTableDefinition() || name.isPartitionDefinition() || name.isViewDefinition()) { + return getSession(name.getCatalogName(), name.getDatabaseName()); + } else { + return getSession(name.getCatalogName()); + } + } + + private Session getSession(String catalogName) { + return getSession(catalogName, "%"); + } + + private Session getSession(String catalogName, String databaseName) { + String source = metacatConnectorManager.getCatalogConfig(catalogName).getType(); + MetacatContext context = MetacatContextManager.getContext(); + return Session.builder(sessionPropertyManager) + .setIdentity(new Identity(context.getUserName(), Optional.empty())) + .setRemoteUserAddress(context.getClientId()) + .setSource(source) + .setCatalog(catalogName) + .setSchema(databaseName) + .setTimeZoneKey(UTC_KEY) + .setLocale(ENGLISH) + .build(); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/TableService.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/TableService.java new file mode 100644 index 000000000..51bc32aa7 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/TableService.java @@ -0,0 +1,44 @@ +package com.netflix.metacat.main.services; + +import com.facebook.presto.metadata.TableHandle; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.TableDto; + +import javax.annotation.Nonnull; +import java.util.List; +import java.util.Optional; + +public interface TableService { + /** + * Creates the table. + * @param name qualified name of the table + * @param tableDto table metadata + */ + void create(@Nonnull QualifiedName name, @Nonnull TableDto tableDto); + + /** + * Deletes the table. Returns the table metadata of the table deleted. + * @param name qualified name of the table to be deleted + * @return + */ + TableDto delete(@Nonnull QualifiedName name); + + Optional get(@Nonnull QualifiedName name, boolean includeUserMetadata); + + Optional get(@Nonnull QualifiedName name, boolean includeInfo, boolean includeDefinitionMetadata, boolean includeDataMetadata); + + Optional getTableHandle(@Nonnull QualifiedName name); + + void rename(@Nonnull QualifiedName oldName, @Nonnull QualifiedName newName, boolean isMView); + + void update(@Nonnull QualifiedName name, @Nonnull TableDto tableDto); + + TableDto copy(@Nonnull QualifiedName name, @Nonnull QualifiedName targetName); + + TableDto copy(@Nonnull TableDto tableDto, @Nonnull QualifiedName targetName); + + void saveMetadata(@Nonnull QualifiedName name, ObjectNode definitionMetadata, ObjectNode dataMetadata); + + List getQualifiedNames(String uri, boolean prefixSearch); +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/CatalogServiceImpl.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/CatalogServiceImpl.java new file mode 100644 index 000000000..911c59b59 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/CatalogServiceImpl.java @@ -0,0 +1,66 @@ +package com.netflix.metacat.main.services.impl; + +import com.facebook.presto.Session; +import com.google.inject.Inject; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.CatalogMappingDto; +import com.netflix.metacat.common.exception.MetacatNotFoundException; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.main.connector.MetacatConnectorManager; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.services.CatalogService; +import com.netflix.metacat.main.services.SessionProvider; +import com.netflix.metacat.main.spi.MetacatCatalogConfig; + +import javax.annotation.Nonnull; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +public class CatalogServiceImpl implements CatalogService { + @Inject + MetacatConnectorManager metacatConnectorManager; + @Inject + MetadataManager metadataManager; + @Inject + SessionProvider sessionProvider; + @Inject + UserMetadataService userMetadataService; + + @Nonnull + @Override + public CatalogDto get(@Nonnull QualifiedName name) { + Session session = sessionProvider.getSession(name); + + MetacatCatalogConfig config = metacatConnectorManager.getCatalogConfig(name); + + CatalogDto result = new CatalogDto(); + result.setName(name); + result.setType(config.getType()); + result.setDatabases(metadataManager.listSchemaNames(session, name.getCatalogName()) + .stream() + .filter(s -> config.getSchemaBlacklist().isEmpty() || !config.getSchemaBlacklist().contains(s)) + .filter(s -> config.getSchemaWhitelist().isEmpty() || config.getSchemaWhitelist().contains(s)) + .sorted(String.CASE_INSENSITIVE_ORDER) + .collect(Collectors.toList()) + ); + + userMetadataService.populateMetadata(result); + + return result; + } + + @Nonnull + @Override + public List getCatalogNames() { + Map catalogs = metacatConnectorManager.getCatalogs(); + if (catalogs.isEmpty()) { + throw new MetacatNotFoundException("Unable to locate any catalogs"); + } + + return catalogs.entrySet().stream() + .map(entry -> new CatalogMappingDto(entry.getKey(), entry.getValue().getType())) + .collect(Collectors.toList()); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DatabaseServiceImpl.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DatabaseServiceImpl.java new file mode 100644 index 000000000..0d72d51b9 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/DatabaseServiceImpl.java @@ -0,0 +1,145 @@ +package com.netflix.metacat.main.services.impl; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.QualifiedTableName; +import com.facebook.presto.metadata.QualifiedTablePrefix; +import com.facebook.presto.spi.ConnectorSchemaMetadata; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaNotFoundException; +import com.facebook.presto.spi.StandardErrorCode; +import com.google.common.collect.ImmutableList; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.DatabaseCreateRequestDto; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.converters.PrestoConverters; +import com.netflix.metacat.main.connector.MetacatConnectorManager; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.services.CatalogService; +import com.netflix.metacat.main.services.DatabaseService; +import com.netflix.metacat.main.services.SessionProvider; +import com.netflix.metacat.main.spi.MetacatCatalogConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +public class DatabaseServiceImpl implements DatabaseService { + private static final Logger log = LoggerFactory.getLogger(DatabaseServiceImpl.class); + @Inject + CatalogService catalogService; + @Inject + MetacatConnectorManager metacatConnectorManager; + @Inject + MetadataManager metadataManager; + @Inject + PrestoConverters prestoConverters; + @Inject + SessionProvider sessionProvider; + @Inject + UserMetadataService userMetadataService; + + @Override + public void create(QualifiedName name, DatabaseCreateRequestDto databaseCreateRequestDto) { + Session session = validateAndGetSession(name); + log.info("Creating schema {}", name); + metadataManager.createSchema(session, new ConnectorSchemaMetadata(name.getDatabaseName())); + if( databaseCreateRequestDto != null && databaseCreateRequestDto.getDefinitionMetadata() != null){ + log.info("Saving user metadata for schema {}", name); + userMetadataService.saveDefinitionMetadata(name, session.getUser(), Optional.of(databaseCreateRequestDto.getDefinitionMetadata()), true); + } + } + + @Override + public void update(QualifiedName name, DatabaseCreateRequestDto databaseCreateRequestDto) { + Session session = validateAndGetSession(name); + log.info("Updating schema {}", name); + try { + metadataManager.updateSchema(session, new ConnectorSchemaMetadata(name.getDatabaseName())); + } catch(PrestoException e){ + if (e.getErrorCode() != StandardErrorCode.NOT_SUPPORTED.toErrorCode()){ + throw e; + } + } + if( databaseCreateRequestDto != null && databaseCreateRequestDto.getDefinitionMetadata() != null){ + log.info("Saving user metadata for schema {}", name); + userMetadataService.saveDefinitionMetadata(name, session.getUser(), Optional.of(databaseCreateRequestDto.getDefinitionMetadata()), true); + } + } + + @Override + public void delete(QualifiedName name) { + Session session = validateAndGetSession(name); + log.info("Dropping schema {}", name); + metadataManager.dropSchema(session); + + // Delete definition metadata if it exists + if (userMetadataService.getDefinitionMetadata(name).isPresent()) { + log.info("Deleting user metadata for schema {}", name); + userMetadataService.deleteDefinitionMetadatas(ImmutableList.of(name)); + } + } + + @Override + public DatabaseDto get(QualifiedName name, boolean includeUserMetadata) { + Session session = validateAndGetSession(name); + MetacatCatalogConfig config = metacatConnectorManager.getCatalogConfig(name.getCatalogName()); + + QualifiedTablePrefix spec = new QualifiedTablePrefix(name.getCatalogName(), name.getDatabaseName()); + List tableNames = metadataManager.listTables(session, spec); + List viewNames = Collections.emptyList(); + if (config.isIncludeViewsWithTables()) { + // TODO JdbcMetadata returns ImmutableList.of() for views. We should change it to fetch views. + viewNames = metadataManager.listViews(session, spec); + } + + // Check to see if schema exists + if( tableNames.isEmpty() && viewNames.isEmpty()){ + if(!exists(name)){ + throw new SchemaNotFoundException(name.getDatabaseName()); + } + } + + ConnectorSchemaMetadata schema = metadataManager.getSchema(session); + + DatabaseDto dto = new DatabaseDto(); + dto.setType(metacatConnectorManager.getCatalogConfig(name).getType()); + dto.setName(name); + dto.setUri(schema.getUri()); + dto.setMetadata(schema.getMetadata()); + dto.setTables( + Stream.concat(tableNames.stream(), viewNames.stream()) + .map(QualifiedTableName::getTableName) + .sorted(String.CASE_INSENSITIVE_ORDER) + .collect(Collectors.toList()) + ); + if( includeUserMetadata) { + log.info("Populate user metadata for schema {}", name); + userMetadataService.populateMetadata(dto); + } + + return dto; + } + + @Override + public boolean exists(QualifiedName name) { + CatalogDto catalogDto = catalogService.get(QualifiedName.ofCatalog(name.getCatalogName())); + return catalogDto.getDatabases().contains(name.getDatabaseName()); + } + + private Session validateAndGetSession(QualifiedName name) { + checkNotNull(name, "name cannot be null"); + checkState(name.isDatabaseDefinition(), "name %s is not for a database", name); + + return sessionProvider.getSession(name); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/MViewServiceImpl.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/MViewServiceImpl.java new file mode 100644 index 000000000..41a5a8c06 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/MViewServiceImpl.java @@ -0,0 +1,317 @@ +package com.netflix.metacat.main.services.impl; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.QualifiedTableName; +import com.facebook.presto.metadata.QualifiedTablePrefix; +import com.facebook.presto.metadata.TableMetadata; +import com.facebook.presto.spi.AuditInfo; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.NotFoundException; +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.Sort; +import com.facebook.presto.spi.TableNotFoundException; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.base.MoreObjects; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; +import com.netflix.metacat.common.NameDateDto; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; +import com.netflix.metacat.common.dto.StorageDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.exception.MetacatNotSupportedException; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.converters.PrestoConverters; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.services.MViewService; +import com.netflix.metacat.main.services.PartitionService; +import com.netflix.metacat.main.services.SessionProvider; +import com.netflix.metacat.main.services.TableService; +import org.joda.time.Instant; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import javax.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +/** + * Created by amajumdar on 4/13/15. + */ +public class MViewServiceImpl implements MViewService { + private static final Logger log = LoggerFactory.getLogger(MViewServiceImpl.class); + public static final String VIEW_DB_NAME = "franklinviews"; + public static final List SUPPORTED_SOURCES = Lists.newArrayList("hive", "s3", "aegisthus"); + @Inject + MetadataManager metadataManager; + @Inject + SessionProvider sessionProvider; + @Inject + TableService tableService; + @Inject + PartitionService partitionService; + @Inject + PrestoConverters prestoConverters; + @Inject + UserMetadataService userMetadataService; + + /** + * Creates the materialized view using the schema of the give table + * Assumes that the "franklinviews" database name already exists in the given catalog. + */ + @Override + public TableDto create(@Nonnull QualifiedName name) { + TableDto result = null; + // Get the table + log.info("Get the table {}", name); + Optional oTable = tableService.get(name, false); + if( oTable.isPresent()){ + TableDto table = oTable.get(); + String viewName = createViewName(name); + QualifiedName targetName = QualifiedName.ofTable( name.getCatalogName(), VIEW_DB_NAME, viewName); + // Get the view table if it exists + log.info("Check if the view table {} exists.", targetName); + Optional oViewTable = Optional.empty(); + try { + oViewTable = tableService.get(targetName, false); + } catch(NotFoundException ignored){ + + } + if( !oViewTable.isPresent()){ + log.info("Creating view {}.", targetName); + result = tableService.copy( table, targetName); + } else { + result = oViewTable.get(); + } + } else { + throw new TableNotFoundException(new SchemaTableName(name.getDatabaseName(), name.getTableName())); + } + return result; + } + + @Override + public TableDto delete(@Nonnull QualifiedName name) { + QualifiedName viewQName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + log.info("Deleting view {}.", viewQName); + return tableService.delete(viewQName); + } + + @Override + public void update( + @Nonnull + QualifiedName name, + @Nonnull + TableDto tableDto) { + QualifiedName viewQName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + log.info("Updating view {}.", viewQName); + tableService.update(viewQName, tableDto); + } + + @Override + public Optional get(@Nonnull QualifiedName name) { + QualifiedName viewQName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + Optional result = tableService.get( viewQName, false); + + // + // User definition metadata of the underlying table is returned + // + if( result.isPresent()){ + TableDto table = result.get(); + table.setName(name); + QualifiedName tableName = QualifiedName.ofTable( name.getCatalogName(), name.getDatabaseName(), name.getTableName()); + Optional definitionMetadata = userMetadataService.getDefinitionMetadata(tableName); + if( definitionMetadata.isPresent()){ + userMetadataService.populateMetadata( table, definitionMetadata.get(), null); + } + } + return result; + } + + @Override + public void snapshotPartitions( @Nonnull QualifiedName name, String filter) { + List partitionDtos = partitionService.list(name, filter, null, null, null, false, false, true); + if( partitionDtos != null && !partitionDtos.isEmpty()) { + log.info("Snapshot partitions({}) for view {}.", partitionDtos.size(), name); + savePartitions(name, partitionDtos, null, false, true); + } + } + + @Override + public PartitionsSaveResponseDto savePartitions( @Nonnull QualifiedName name, List partitionDtos + , List partitionIdsForDeletes, boolean merge, boolean checkIfExists) { + if( partitionDtos == null || partitionDtos.isEmpty()){ + return new PartitionsSaveResponseDto(); + } + QualifiedName viewQName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + partitionDtos.forEach(partitionDto -> + partitionDto.setName(QualifiedName + .ofPartition(viewQName.getCatalogName(), viewQName.getDatabaseName(), viewQName.getTableName(), + partitionDto.getName().getPartitionName()))); + if( merge) { + List partitionNames = partitionDtos.stream().map( + partitionDto -> partitionDto.getName().getPartitionName()).collect(Collectors.toList()); + List existingPartitions = partitionService.list(viewQName, null, partitionNames, null, null, false, false, false); + Map existingPartitionsMap = existingPartitions.stream() + .collect(Collectors + .toMap(partitionDto -> partitionDto.getName().getPartitionName(), Function.identity())); + List mergedPartitions = partitionDtos.stream() + .map(partitionDto -> { + String partitionName = partitionDto.getName().getPartitionName(); + PartitionDto existingPartition = existingPartitionsMap.get(partitionName); + return mergePartition( partitionDto, existingPartition); + }).collect(Collectors.toList()); + return partitionService.save( viewQName, mergedPartitions, partitionIdsForDeletes, checkIfExists); + } else { + return partitionService.save( viewQName, partitionDtos, partitionIdsForDeletes, checkIfExists); + } + } + + private PartitionDto mergePartition(PartitionDto partitionDto, PartitionDto existingPartition) { + if( existingPartition != null){ + StorageDto existingSerde = existingPartition.getSerde(); + if( existingSerde != null) { + StorageDto serde = partitionDto.getSerde(); + if (serde == null) { + serde = new StorageDto(); + partitionDto.setSerde(serde); + } + if (serde.getUri() == null || serde.getUri().equals(existingSerde.getUri())) { + serde.setUri(existingSerde.getUri()); + if( serde.getInputFormat() == null){ + serde.setInputFormat( existingSerde.getInputFormat()); + } + if( serde.getOutputFormat() == null){ + serde.setOutputFormat(existingSerde.getOutputFormat()); + } + if( serde.getSerializationLib() == null){ + serde.setSerializationLib(existingSerde.getSerializationLib()); + } + } + } + } + return partitionDto; + } + + @Override + public void deletePartitions( @Nonnull QualifiedName name, List partitionIds) { + QualifiedName viewQName = QualifiedName.ofTable( name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + partitionService.delete( viewQName, partitionIds); + } + + @Override + public List listPartitions(@Nonnull QualifiedName name, String filter, List partitionNames, Sort sort, + Pageable pageable, boolean includeUserMetadata, boolean includePartitionDetails) { + QualifiedName viewQName = QualifiedName.ofTable( name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + return partitionService.list( viewQName, filter, partitionNames, sort, pageable, includeUserMetadata, includeUserMetadata, includePartitionDetails); + } + + @Override + public Integer partitionCount(QualifiedName name) { + QualifiedName viewQName = QualifiedName.ofTable(name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + return partitionService.count(viewQName); + } + + @Override + public List list(@Nonnull QualifiedName name) { + QualifiedName viewDbName = QualifiedName.ofDatabase(name.getCatalogName(), VIEW_DB_NAME); + QualifiedTablePrefix viewDbNamePrefix = new QualifiedTablePrefix(name.getCatalogName(), VIEW_DB_NAME); + // Get the session + Session session = sessionProvider.getSession( viewDbName); + List tableNames = Lists.newArrayList(); + try{ + tableNames = metadataManager.listTables( session, viewDbNamePrefix); + } catch(Exception ignored){ + // ignore. Return an empty list if database 'franklinviews' does not exist + } + if( !name.isDatabaseDefinition() && name.isCatalogDefinition()){ + return tableNames.stream() + .map(prestoConverters::toQualifiedName) + .map(viewName -> { + NameDateDto dto = new NameDateDto(); + dto.setName(viewName); + return dto; + }) + .collect(Collectors.toList()); + } else { + final String prefix = String.format("%s_%s_", name.getDatabaseName(), + MoreObjects.firstNonNull(name.getTableName(), "")); + return tableNames.stream() + .filter(qualifiedTableName -> qualifiedTableName.getTableName().startsWith(prefix)) + .map(qualifiedTableName -> { + NameDateDto dto = new NameDateDto(); + dto.setName(QualifiedName.ofView( qualifiedTableName.getCatalogName(), name.getDatabaseName(), name.getTableName(), qualifiedTableName.getTableName().substring(prefix.length()))); + return dto; + }) + .collect(Collectors.toList()); + } + } + + private NameDateDto createNameDto(TableMetadata table) { + NameDateDto dto = new NameDateDto(); + dto.setName(QualifiedName.ofTable(table.getConnectorId(), table.getTable().getSchemaName(), + table.getTable().getTableName())); + ConnectorTableMetadata connectorTableMetadata = table.getMetadata(); + if( connectorTableMetadata instanceof ConnectorTableDetailMetadata){ + ConnectorTableDetailMetadata ctdm = (ConnectorTableDetailMetadata) connectorTableMetadata; + AuditInfo auditInfo = ctdm.getAuditInfo(); + if( auditInfo != null){ + if( auditInfo.getCreatedDate() != null) { + dto.setCreateDate(new Instant(auditInfo.getCreatedDate()).toDate()); + } + if( auditInfo.getLastUpdatedDate() != null) { + dto.setLastUpdated(new Instant(auditInfo.getLastUpdatedDate()).toDate()); + } + } + } + return dto; + } + + @Override + public void saveMetadata( + @Nonnull + QualifiedName name, ObjectNode definitionMetadata, ObjectNode dataMetadata) { + QualifiedName viewQName = QualifiedName.ofTable( name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + tableService.saveMetadata( viewQName, definitionMetadata, dataMetadata); + } + + @Override + public void rename(QualifiedName name, QualifiedName newViewName) { + QualifiedName oldViewQName = QualifiedName.ofTable( name.getCatalogName(), VIEW_DB_NAME, createViewName(name)); + QualifiedName newViewQName = QualifiedName.ofTable( newViewName.getCatalogName(), VIEW_DB_NAME, createViewName(newViewName)); + tableService.rename(oldViewQName, newViewQName, true); + } + + /** + * Validate the qualified name. + * Validate if the catalog is one of the catalogs that support views. + * Assumes that the "franklinviews" database name already exists in the given catalog. + */ + private Session validateAndGetSession(QualifiedName name) { + checkNotNull(name, "name cannot be null"); + checkState(name.isViewDefinition(), "name %s is not for a view", name); + + if(!Iterables.contains(SUPPORTED_SOURCES, name.getCatalogName())){ + throw new MetacatNotSupportedException(String.format("This catalog (%s) doesn't support views", name.getCatalogName())); + } + return sessionProvider.getSession(name); + } + + /** + * The view is going to be represented by a table in a special db in Franklin. As such there must be + * a conversion from view id -> view table id like so: + * [dbName]_[tableName]_[viewName] + */ + private String createViewName(QualifiedName name){ + return String.format("%s_%s_%s", name.getDatabaseName(), name.getTableName(), name.getViewName()); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/PartitionServiceImpl.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/PartitionServiceImpl.java new file mode 100644 index 000000000..705747f48 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/PartitionServiceImpl.java @@ -0,0 +1,212 @@ +package com.netflix.metacat.main.services.impl; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.TableHandle; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionResult; +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.SavePartitionResult; +import com.facebook.presto.spi.SchemaTablePartitionName; +import com.facebook.presto.spi.Sort; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.base.Splitter; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.HasMetadata; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.PartitionsSaveResponseDto; +import com.netflix.metacat.common.exception.MetacatNotFoundException; +import com.netflix.metacat.common.monitoring.DynamicGauge; +import com.netflix.metacat.common.monitoring.LogConstants; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.converters.PrestoConverters; +import com.netflix.metacat.main.presto.split.SplitManager; +import com.netflix.metacat.main.services.CatalogService; +import com.netflix.metacat.main.services.PartitionService; +import com.netflix.metacat.main.services.SessionProvider; +import com.netflix.metacat.main.services.TableService; +import com.netflix.servo.tag.BasicTagList; +import com.netflix.servo.tag.TagList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +public class PartitionServiceImpl implements PartitionService { + private static final Logger log = LoggerFactory.getLogger(PartitionServiceImpl.class); + @Inject + CatalogService catalogService; + @Inject + PrestoConverters prestoConverters; + @Inject + SplitManager splitManager; + @Inject + TableService tableService; + @Inject + UserMetadataService userMetadataService; + @Inject + SessionProvider sessionProvider; + + private ConnectorPartitionResult getPartitionResult(QualifiedName name, String filter, List partitionNames, Sort sort, Pageable pageable, boolean includePartitionDetails) { + ConnectorPartitionResult result = null; + Optional tableHandle = tableService.getTableHandle(name); + if (tableHandle.isPresent()) { + result = splitManager.getPartitions(tableHandle.get(), filter, partitionNames, sort, pageable, includePartitionDetails); + } + return result; + } + + @Override + public List list(QualifiedName name, String filter, List partitionNames, Sort sort + , Pageable pageable, boolean includeUserDefinitionMetadata, boolean includeUserDataMetadata, boolean includePartitionDetails) { + ConnectorPartitionResult partitionResult = getPartitionResult(name, filter, partitionNames, sort, pageable, includePartitionDetails); + List result = Collections.emptyList(); + if (partitionResult != null) { + List names = Lists.newArrayList(); + List uris = Lists.newArrayList(); + result = partitionResult.getPartitions().stream() + .map(partition -> { + PartitionDto result1 = toPartitionDto(name, partition ); + names.add( result1.getName()); + uris.add(result1.getDataUri()); + return result1; + }) + .collect(Collectors.toList()); + if(includeUserDefinitionMetadata || includeUserDataMetadata){ + Map dataMetadataMap = includeUserDataMetadata?userMetadataService.getDataMetadataMap(uris): + Maps.newHashMap(); + Map definitionMetadataMap = includeUserDefinitionMetadata?userMetadataService.getDefinitionMetadataMap(names): + Maps.newHashMap(); + result.stream().forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto + , definitionMetadataMap.get(partitionDto.getName().toString()) + , dataMetadataMap.get(partitionDto.getDataUri()))); + } + } + TagList tags = BasicTagList.of("catalog", name.getCatalogName(), "database", name.getDatabaseName(), "table", name.getTableName()); + DynamicGauge.set(LogConstants.GaugeGetPartitionsCount.toString(), tags, result.size()); + log.info("Got {} partitions for {} using filter: {} and partition names: {}", result.size(), name, filter, partitionNames); + return result; + } + + @Override + public Integer count(QualifiedName name) { + Integer result = 0; + Optional tableHandle = tableService.getTableHandle(name); + if (tableHandle.isPresent()) { + Session session = sessionProvider.getSession(name); + result = splitManager.getPartitionCount( session, tableHandle.get()); + } + return result; + } + + @Override + public PartitionsSaveResponseDto save(QualifiedName name, List partitionDtos + , List partitionIdsForDeletes, boolean checkIfExists) { + PartitionsSaveResponseDto result = new PartitionsSaveResponseDto(); + // If no partitions are passed, then return + if( partitionDtos == null || partitionDtos.isEmpty()){ + return result; + } + TagList tags = BasicTagList.of("catalog", name.getCatalogName(), "database", name.getDatabaseName(), "table", + name.getTableName()); + DynamicGauge.set(LogConstants.GaugeAddPartitions.toString(), tags, partitionDtos.size()); + Session session = sessionProvider.getSession(name); + TableHandle tableHandle = tableService.getTableHandle(name).orElseThrow(() -> + new MetacatNotFoundException("Unable to locate " + name)); + List partitions = partitionDtos.stream() + .map(prestoConverters::fromPartitionDto) + .collect(Collectors.toList()); + List deletePartitions = Lists.newArrayList(); + if( partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) { + DynamicGauge.set(LogConstants.GaugeDeletePartitions.toString(), tags, partitionIdsForDeletes.size()); + ConnectorPartitionResult deletePartitionResult = splitManager.getPartitions(tableHandle, null, partitionIdsForDeletes, null, null, false); + deletePartitions = deletePartitionResult.getPartitions().stream() + .map(partition -> toPartitionDto(name, partition )) + .collect(Collectors.toList()); + } + // + // Save all the new and updated partitions + // + log.info("Saving partitions({}) for {}", partitions.size(), name); + SavePartitionResult savePartitionResult = splitManager.savePartitions(tableHandle, partitions, partitionIdsForDeletes, checkIfExists); + + // Save metadata + log.info("Saving user metadata for partitions for {}", name); + userMetadataService.saveMetadatas(session.getUser(), partitionDtos, true); + // delete metadata + if( !deletePartitions.isEmpty()) { + log.info("Deleting user metadata for partitions with names {} for {}", partitionIdsForDeletes, name); + userMetadataService.deleteMetadatas(deletePartitions, false); + } + + result.setUpdated(savePartitionResult.getUpdated()); + result.setAdded(savePartitionResult.getAdded()); + + return result; + } + + private void validatePartitionName(String partitionName, List partitionKeys) { + for (String part : Splitter.on('/').omitEmptyStrings().split(partitionName)) { + if (part.contains("=")) { + String[] values = part.split("=", 2); + + if( partitionKeys == null || !partitionKeys.contains(values[0])){ + throw new IllegalArgumentException(String.format("Partition name %s is invalid", partitionName)); + } + } else { + throw new IllegalArgumentException(String.format("Partition name %s is invalid", partitionName)); + } + } + } + + @Override + public void delete(QualifiedName name, List partitionIds) { + TagList tags = BasicTagList.of("catalog", name.getCatalogName(), "database", name.getDatabaseName(), "table", name.getTableName()); + DynamicGauge.set(LogConstants.GaugeDeletePartitions.toString(), tags, partitionIds.size()); + Optional tableHandle = tableService.getTableHandle(name); + if (tableHandle.isPresent() && !partitionIds.isEmpty()) { + ConnectorPartitionResult partitionResult = splitManager.getPartitions(tableHandle.get(), null, partitionIds, null, null, false); + log.info("Deleting partitions with names {} for {}", partitionIds, name); + splitManager.deletePartitions( tableHandle.get(), partitionIds); + List partitions = partitionResult.getPartitions().stream() + .map(partition -> toPartitionDto(name, partition )) + .collect(Collectors.toList()); + // delete metadata + log.info("Deleting user metadata for partitions with names {} for {}", partitionIds, name); + userMetadataService.deleteMetadatas(partitions, false); + } + } + + @Override + public List getQualifiedNames(String uri, boolean prefixSearch){ + List result = Lists.newArrayList(); + + catalogService.getCatalogNames().stream().forEach(catalog -> { + Session session = sessionProvider.getSession(QualifiedName.ofCatalog(catalog.getCatalogName())); + List schemaTablePartitionNames = splitManager.getPartitionNames( session, uri, prefixSearch); + List qualifiedNames = schemaTablePartitionNames.stream().map( + schemaTablePartitionName -> QualifiedName.ofPartition( catalog.getConnectorName() + , schemaTablePartitionName.getTableName().getSchemaName() + , schemaTablePartitionName.getTableName().getTableName() + , schemaTablePartitionName.getPartitionId())).collect(Collectors.toList()); + result.addAll(qualifiedNames); + }); + return result; + } + + private PartitionDto toPartitionDto(QualifiedName tableName, ConnectorPartition partition) { + QualifiedName partitionName = QualifiedName.ofPartition( + tableName.getCatalogName(), + tableName.getDatabaseName(), + tableName.getTableName(), + partition.getPartitionId() + ); + return prestoConverters.toPartitionDto(partitionName, partition); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/TableServiceImpl.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/TableServiceImpl.java new file mode 100644 index 000000000..90260a771 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/impl/TableServiceImpl.java @@ -0,0 +1,360 @@ +package com.netflix.metacat.main.services.impl; + +import com.facebook.presto.Session; +import com.facebook.presto.metadata.QualifiedTableName; +import com.facebook.presto.metadata.TableHandle; +import com.facebook.presto.metadata.TableMetadata; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.type.TypeManager; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.base.MoreObjects; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.netflix.metacat.common.NameDateDto; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.StorageDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.exception.MetacatNotSupportedException; +import com.netflix.metacat.common.usermetadata.TagService; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.converters.PrestoConverters; +import com.netflix.metacat.main.connector.MetacatConnectorManager; +import com.netflix.metacat.main.presto.metadata.MetadataManager; +import com.netflix.metacat.main.services.DatabaseService; +import com.netflix.metacat.main.services.MViewService; +import com.netflix.metacat.main.services.SessionProvider; +import com.netflix.metacat.main.services.TableService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import javax.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +public class TableServiceImpl implements TableService { + private static final Logger log = LoggerFactory.getLogger(TableServiceImpl.class); + @Inject + MetacatConnectorManager metacatConnectorManager; + @Inject + MetadataManager metadataManager; + @Inject + PrestoConverters prestoConverters; + @Inject + SessionProvider sessionProvider; + @Inject + UserMetadataService userMetadataService; + @Inject + DatabaseService databaseService; + @Inject + TagService tagService; + @Inject + MViewService mViewService; + private static final String NAME_TAGS = "tags"; + + @Override + public void create(@Nonnull QualifiedName name, @Nonnull TableDto tableDto) { + Session session = validateAndGetSession(name); + // + // Set the owner,if null, with the session user name. + // + setOwnerIfNull(tableDto, session.getUser()); + log.info("Creating table {}", name); + metadataManager.createTable( + session, + name.getCatalogName(), + prestoConverters.fromTableDto(name, tableDto, metadataManager.getTypeManager()) + ); + + if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) { + log.info("Saving user metadata for table {}", name); + userMetadataService.saveMetadata(session.getUser(), tableDto, false); + tag( name, tableDto.getDefinitionMetadata()); + } + } + + private void setOwnerIfNull(TableDto tableDto, String user) { + if(!Strings.isNullOrEmpty(user)) { + StorageDto serde = tableDto.getSerde(); + if (serde == null) { + serde = new StorageDto(); + tableDto.setSerde(serde); + } + if (Strings.isNullOrEmpty(serde.getOwner())) { + serde.setOwner(user); + } + } + } + + private void tag(QualifiedName name, ObjectNode definitionMetadata) { + if( definitionMetadata != null && definitionMetadata.get(NAME_TAGS) != null){ + JsonNode tagsNode = definitionMetadata.get(NAME_TAGS); + Set tags = Sets.newHashSet(); + if( tagsNode.isArray() && tagsNode.size() > 0){ + for(JsonNode tagNode: tagsNode){ + tags.add( tagNode.textValue()); + } + log.info("Setting tags {} for table {}", tags, name); + tagService.setTableTags( name, tags, false); + } + } + } + + @Override + public TableDto delete(@Nonnull QualifiedName name) { + Session session = validateAndGetSession(name); + QualifiedTableName tableName = prestoConverters.getQualifiedTableName(name); + + Optional tableHandle = metadataManager.getTableHandle(session, tableName); + Optional oTable = get(name, true); + if (oTable.isPresent()) { + log.info("Drop table {}", name); + metadataManager.dropTable(session, tableHandle.get()); + } + + TableDto tableDto = oTable.orElseGet(() -> { + // If the table doesn't exist construct a blank copy we can use to delete the definitionMetadata and tags + TableDto t = new TableDto(); + t.setName(name); + return t; + }); + + // Delete the metadata. Type doesn't matter since we discard the result + log.info("Deleting user metadata for table {}", name); + userMetadataService.deleteMetadatas(Lists.newArrayList(tableDto), false); + log.info("Deleting tags for table {}", name); + tagService.delete(name, false); + + return tableDto; + } + + @Override + public Optional get(@Nonnull QualifiedName name, boolean includeUserMetadata) { + return get( name, true, true, true); + } + + @Override + public Optional get(@Nonnull QualifiedName name, boolean includeInfo, boolean includeDefinitionMetadata, boolean includeDataMetadata) { + Session session = validateAndGetSession(name); + TableDto table; + Optional tableMetadata = Optional.empty(); + if (includeInfo) { + tableMetadata = Optional.ofNullable(getTableMetadata(name, session)); + if (!tableMetadata.isPresent()) { + return Optional.empty(); + } + String type = metacatConnectorManager.getCatalogConfig(name).getType(); + table = prestoConverters.toTableDto(name, type, tableMetadata.get()); + } else { + table = new TableDto(); + table.setName(name); + } + + if (includeDefinitionMetadata) { + Optional definitionMetadata = userMetadataService.getDefinitionMetadata(name); + if (definitionMetadata.isPresent()) { + table.setDefinitionMetadata(definitionMetadata.get()); + } + } + + if (includeDataMetadata) { + if (!tableMetadata.isPresent()) { + tableMetadata = Optional.ofNullable(getTableMetadata(name, session)); + } + if (tableMetadata.isPresent()) { + ConnectorTableMetadata connectorTableMetadata = tableMetadata.get().getMetadata(); + if (connectorTableMetadata instanceof ConnectorTableDetailMetadata) { + ConnectorTableDetailMetadata detailMetadata = (ConnectorTableDetailMetadata) connectorTableMetadata; + StorageInfo storageInfo = detailMetadata.getStorageInfo(); + if (storageInfo != null) { + Optional dataMetadata = userMetadataService.getDataMetadata(storageInfo.getUri()); + if (dataMetadata.isPresent()) { + table.setDataMetadata(dataMetadata.get()); + } + } + } + } + } + + return Optional.of(table); + } + + @Override + public Optional getTableHandle(@Nonnull QualifiedName name) { + Session session = validateAndGetSession(name); + + QualifiedTableName qualifiedTableName = prestoConverters.getQualifiedTableName(name); + return metadataManager.getTableHandle(session, qualifiedTableName); + } + + private TableMetadata getTableMetadata(QualifiedName name, Optional tableHandle) { + if (!tableHandle.isPresent()) { + return null; + } + Session session = validateAndGetSession(name); + TableMetadata result = metadataManager.getTableMetadata(session, tableHandle.get()); + checkState(name.getDatabaseName().equals(result.getTable().getSchemaName()), "Unexpected database"); + checkState(name.getTableName().equals(result.getTable().getTableName()), "Unexpected table"); + + return result; + } + + private TableMetadata getTableMetadata(QualifiedName name, Session session) { + QualifiedTableName qualifiedTableName = prestoConverters.getQualifiedTableName(name); + Optional tableHandle = metadataManager.getTableHandle(session, qualifiedTableName); + return getTableMetadata(name, tableHandle); + } + + @Override + public void rename(@Nonnull QualifiedName oldName, @Nonnull QualifiedName newName, boolean isMView) { + Session session = validateAndGetSession(oldName); + + QualifiedTableName oldPrestoName = prestoConverters.getQualifiedTableName(oldName); + QualifiedTableName newPrestoName = prestoConverters.getQualifiedTableName(newName); + + Optional tableHandle = metadataManager.getTableHandle(session, oldPrestoName); + if (tableHandle.isPresent()) { + //Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata + try { + log.info("Renaming {} {} to {}", isMView?"view":"table", oldName, newName); + metadataManager.renameTable(session, tableHandle.get(), newPrestoName); + + if( !isMView) { + final String prefix = String.format("%s_%s_", oldName.getDatabaseName(), + MoreObjects.firstNonNull(oldName.getTableName(), "")); + List views = mViewService.list(oldName); + if (views != null && !views.isEmpty()) { + views.forEach(view -> { + QualifiedName newViewName = QualifiedName.ofView(oldName.getCatalogName(), oldName.getDatabaseName(), newName.getTableName(), view.getName().getViewName()); + mViewService.rename(view.getName(), newViewName); + }); + } + } + } catch(PrestoException e){ + if(!NOT_SUPPORTED.toErrorCode().equals(e.getErrorCode())){ + throw e; + } + } + userMetadataService.renameDefinitionMetadataKey(oldName, newName); + tagService.rename(oldName, newName.getTableName()); + } + } + + @Override + public void update(@Nonnull QualifiedName name, @Nonnull TableDto tableDto) { + Session session = validateAndGetSession(name); + + //Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata + try { + TypeManager typeManager = metadataManager.getTypeManager(); + log.info("Updating table {}", name); + metadataManager.alterTable(session, prestoConverters.fromTableDto(name, tableDto, typeManager)); + } catch(PrestoException e){ + if(!NOT_SUPPORTED.toErrorCode().equals(e.getErrorCode())){ + throw e; + } + } + + // Merge in metadata if the user sent any + if (tableDto.getDataMetadata() != null || tableDto.getDefinitionMetadata() != null) { + log.info("Saving user metadata for table {}", name); + userMetadataService.saveMetadata(session.getUser(), tableDto, true); + } + } + + @Override + public TableDto copy( @Nonnull QualifiedName sourceName, @Nonnull QualifiedName targetName) { + // Source should be same + if( !sourceName.getCatalogName().equals(targetName.getCatalogName())){ + throw new MetacatNotSupportedException("Cannot copy a table from a different source"); + } + // Error out when source table does not exists + Optional oTable = get(sourceName, false); + if( !oTable.isPresent()){ + throw new TableNotFoundException(new SchemaTableName(sourceName.getDatabaseName(), sourceName.getTableName())); + } + // Error out when target table already exists + Optional oTargetTable = get( targetName, false); + if( oTargetTable.isPresent()){ + throw new TableNotFoundException(new SchemaTableName(targetName.getDatabaseName(), targetName.getTableName())); + } + return copy(oTable.get(), targetName); + } + + @Override + public TableDto copy(@Nonnull TableDto tableDto, @Nonnull QualifiedName targetName) { + if( !databaseService.exists( targetName)){ + databaseService.create( targetName, null); + } + TableDto targetTableDto = new TableDto(); + targetTableDto.setName( targetName); + targetTableDto.setFields(tableDto.getFields()); + targetTableDto.setPartition_keys( tableDto.getPartition_keys()); + StorageDto storageDto = tableDto.getSerde(); + if( storageDto != null) { + StorageDto targetStorageDto = new StorageDto(); + targetStorageDto.setInputFormat(storageDto.getInputFormat()); + targetStorageDto.setOwner(storageDto.getOwner()); + targetStorageDto.setOutputFormat(storageDto.getOutputFormat()); + targetStorageDto.setParameters(storageDto.getParameters()); + targetStorageDto.setUri(storageDto.getUri()); + targetStorageDto.setSerializationLib(storageDto.getSerializationLib()); + targetTableDto.setSerde(targetStorageDto); + } + create( targetName, targetTableDto); + return targetTableDto; + } + + @Override + public void saveMetadata( + @Nonnull + QualifiedName name, ObjectNode definitionMetadata, ObjectNode dataMetadata) { + Session session = validateAndGetSession(name); + Optional tableDtoOptional = get( name, false); + if( tableDtoOptional.isPresent()){ + TableDto tableDto = tableDtoOptional.get(); + tableDto.setDefinitionMetadata( definitionMetadata); + tableDto.setDataMetadata( dataMetadata); + log.info("Saving user metadata for table {}", name); + userMetadataService.saveMetadata( session.getUser(), tableDto, true); + tag( name, tableDto.getDefinitionMetadata()); + } + } + + @Override + public List getQualifiedNames(String uri, boolean prefixSearch){ + List result = Lists.newArrayList(); + Map catalogNames = metadataManager.getCatalogNames(); + + catalogNames.values().stream().forEach(catalogName -> { + Session session = sessionProvider.getSession(QualifiedName.ofCatalog(catalogName)); + List schemaTableNames = metadataManager.getTableNames( session, uri, prefixSearch); + List qualifiedNames = schemaTableNames.stream().map( + schemaTableName -> QualifiedName.ofTable( catalogName, schemaTableName.getSchemaName(), schemaTableName.getTableName())).collect(Collectors.toList()); + result.addAll(qualifiedNames); + }); + return result; + } + + private Session validateAndGetSession(QualifiedName name) { + checkNotNull(name, "name cannot be null"); + checkArgument(name.isTableDefinition(), "Definition {} does not refer to a table", name); + + return sessionProvider.getSession(name); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchClientProvider.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchClientProvider.java new file mode 100644 index 000000000..ee92181a4 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchClientProvider.java @@ -0,0 +1,38 @@ +package com.netflix.metacat.main.services.search; + +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.netflix.metacat.common.server.Config; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.InetSocketTransportAddress; + +import javax.inject.Inject; +import javax.inject.Provider; + +public class ElasticSearchClientProvider implements Provider { + private Client client; + + @Inject + public ElasticSearchClientProvider(Config config) { + String clusterName = config.getElasticSearchClusterName(); + if( clusterName != null ) { + Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", clusterName) + .put("transport.tcp.connect_timeout", "60s").build(); + client = new TransportClient(settings); + // Add the transport address if exists + String clusterNodesStr = config.getElasticSearchClusterNodes(); + if(!Strings.isNullOrEmpty( clusterNodesStr)){ + Iterable clusterNodes = Splitter.on(',').split(clusterNodesStr); + clusterNodes.forEach( clusterNode -> ((TransportClient)client).addTransportAddress( new InetSocketTransportAddress( clusterNode, config.getElasticSearchClusterPort()))); + } + } + } + + @Override + public Client get() { + return client; + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDoc.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDoc.java new file mode 100644 index 000000000..ca09dc811 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchDoc.java @@ -0,0 +1,103 @@ +package com.netflix.metacat.main.services.search; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.json.MetacatJsonLocator; +import org.elasticsearch.action.get.GetResponse; + +import java.util.Map; + +/** + * Created by amajumdar on 8/17/15. + */ +public class ElasticSearchDoc { + protected interface Field { + public static final String USER = "user_"; + public static final String DELETED = "deleted_"; + public static final String REFRESH_MARKER = "refreshMarker_"; + + } + public enum Type { catalog(CatalogDto.class), database(DatabaseDto.class), table(TableDto.class), mview(TableDto.class), partition(PartitionDto.class); + Class clazz; + Type(Class clazz) { + this.clazz = clazz; + } + + public Class getClazz() { + return clazz; + } + } + String id; + Object dto; + String user; + boolean deleted; + String refreshMarker; + + public ElasticSearchDoc(String id, Object dto, String user, boolean deleted) { + this.id = id; + this.dto = dto; + this.user = user; + this.deleted = deleted; + } + + public ElasticSearchDoc(String id, Object dto, String user, boolean deleted, String refreshMarker){ + this.id = id; + this.dto = dto; + this.user = user; + this.deleted = deleted; + this.refreshMarker = refreshMarker; + } + + private static Class getClass(String type){ + return Type.valueOf(type).getClazz(); + } + + public ObjectNode toJsonObject(){ + ObjectNode oMetadata = MetacatJsonLocator.INSTANCE.toJsonObject(dto); + //True if this entity has been deleted + oMetadata.put(Field.DELETED, deleted); + //True if this entity has been deleted + oMetadata.put(Field.USER, user); + if( refreshMarker != null){ + oMetadata.put(Field.REFRESH_MARKER, refreshMarker); + } + return oMetadata; + } + + public String toJsonString(){ + String result = MetacatJsonLocator.INSTANCE.toJsonString(toJsonObject()); + return result.replace("{}", "null"); + } + + public static ElasticSearchDoc parse(GetResponse response){ + ElasticSearchDoc result = null; + if(response.isExists() ){ + Map responseMap = response.getSourceAsMap(); + String user = (String) responseMap.get(Field.USER); + boolean deleted = (boolean) responseMap.get(Field.DELETED); + Object dto = MetacatJsonLocator.INSTANCE.parseJsonValue(response.getSourceAsBytes(), getClass( + response.getType())); + result = new ElasticSearchDoc( response.getId(), dto, user, deleted); + } + return result; + } + + public Object getDto() { + return dto; + } + + public String getUser() { + return user; + } + + public boolean isDeleted() { + return deleted; + } + + public String getId(){ + return id; + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchMetacatRefresh.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchMetacatRefresh.java new file mode 100644 index 000000000..05ff0caeb --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchMetacatRefresh.java @@ -0,0 +1,490 @@ +package com.netflix.metacat.main.services.search; + +import com.facebook.presto.spi.NotFoundException; +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.Sort; +import com.facebook.presto.spi.SortOrder; +import com.google.common.base.Functions; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.base.Throwables; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.AsyncFunction; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.CatalogDto; +import com.netflix.metacat.common.dto.CatalogMappingDto; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.exception.MetacatNotFoundException; +import com.netflix.metacat.common.monitoring.CounterWrapper; +import com.netflix.metacat.common.monitoring.TimerWrapper; +import com.netflix.metacat.common.server.Config; +import com.netflix.metacat.common.usermetadata.UserMetadataService; +import com.netflix.metacat.common.util.MetacatContextManager; +import com.netflix.metacat.main.services.CatalogService; +import com.netflix.metacat.main.services.DatabaseService; +import com.netflix.metacat.main.services.PartitionService; +import com.netflix.metacat.main.services.TableService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.time.Instant; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.catalog; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.database; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.partition; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.table; + +/** + * This class does a refresh of all the metadata entities from original data sources to elastic search + * Created by amajumdar on 8/20/15. + */ +public class ElasticSearchMetacatRefresh { + private static final Logger log = LoggerFactory.getLogger(ElasticSearchMetacatRefresh.class); + private static AtomicBoolean isElasticSearchMetacatRefreshAlreadyRunning = new AtomicBoolean(false); + public static final Predicate notNull = o -> o != null; + private String refreshMarker; + @Inject + CatalogService catalogService; + @Inject + Config config; + @Inject + DatabaseService databaseService; + @Inject + TableService tableService; + @Inject + PartitionService partitionService; + @Inject + ElasticSearchUtil elasticSearchUtil; + @Inject + UserMetadataService userMetadataService; + // Fixed thread pool + ListeningExecutorService service; + ListeningExecutorService esService; + + public void process(){ + List catalogNames = getCatalogNamesToRefresh(); + List qNames = catalogNames.stream() + .map(QualifiedName::ofCatalog).collect(Collectors.toList()); + _process(qNames, () -> _processCatalogs(catalogNames), "process", true, 1000); + } + + public void processCatalogs(List catalogNames){ + List qNames = catalogNames.stream() + .map(QualifiedName::ofCatalog).collect(Collectors.toList()); + _process( qNames, () -> _processCatalogs(catalogNames), "processCatalogs", true, 1000); + } + + public void processDatabases(String catalogName, List databaseNames){ + List qNames = databaseNames.stream() + .map(s -> QualifiedName.ofDatabase(catalogName, s)).collect(Collectors.toList()); + _process(qNames, () -> _processDatabases(QualifiedName.ofCatalog(catalogName), qNames), "processDatabases", true, 1000); + } + + public void processPartitions(){ + List catalogNames = Splitter.on(',').omitEmptyStrings().trimResults() + .splitToList(config.getElasticSearchRefreshPartitionsIncludeCatalogs()); + List qNames = catalogNames.stream() + .map(QualifiedName::ofCatalog).collect(Collectors.toList()); + _process( qNames, () -> _processPartitions(qNames), "processPartitions", false, 500); + } + + private ListenableFuture _processPartitions(List qNames) { + List tables = elasticSearchUtil.getTableIdsByCatalogs(table.name(), qNames); + List>> futures = tables.stream().map(s -> service.submit(() -> { + QualifiedName tableName = QualifiedName.fromString(s, false); + List> indexFutures = Lists.newArrayList(); + int offset = 0; + int count; + Sort sort = null; + if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) { + sort = new Sort("id", SortOrder.ASC); + } else { + sort = new Sort("part_id", SortOrder.ASC); + } + Pageable pageable = new Pageable(10000, offset); + do { + List partitionDtos = partitionService.list(tableName, null, null, sort, pageable, true, + true, true); + count = partitionDtos.size(); + if (!partitionDtos.isEmpty()) { + List> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000); + partitionedPartitionDtos.forEach( + subPartitionsDtos -> indexFutures.add(indexPartitionDtos(tableName, subPartitionsDtos))); + offset = offset + count; + pageable.setOffset( offset); + } + } while (count == 10000); + return Futures.transform(Futures.successfulAsList(indexFutures), Functions.constant((Void) null)); + })).collect(Collectors.toList()); + return Futures.transform(Futures.successfulAsList(futures), + (AsyncFunction>, Void>) input -> { + List> inputFuturesWithoutNulls = input.stream().filter(notNull).collect(Collectors.toList()); + return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls), Functions.constant(null)); + }); + } + + private static ExecutorService newFixedThreadPool(int nThreads, String threadFactoryName, int queueSize) { + return new ThreadPoolExecutor(nThreads, nThreads, + 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(queueSize), + new ThreadFactoryBuilder() + .setNameFormat(threadFactoryName) + .build(), + (r, executor) -> { + // this will block if the queue is full + try { + executor.getQueue().put(r); + } catch (InterruptedException e) { + throw Throwables.propagate(e); + } + }); + } + + private void _process(List qNames, Supplier> supplier, String requestName, boolean delete, int queueSize){ + if( isElasticSearchMetacatRefreshAlreadyRunning.compareAndSet( false, true)) { + TimerWrapper timer = TimerWrapper.createStarted("dse.metacat.timer.ElasticSearchMetacatRefresh." + requestName); + try { + log.info("Start: Full refresh of metacat index in elastic search. Processing {} ...", qNames); + MetacatContext context = new MetacatContext( "admin", "elasticSearchRefresher", null, null, null); + MetacatContextManager.setContext(context); + refreshMarker = Instant.now().toString(); + service = MoreExecutors.listeningDecorator(newFixedThreadPool(50, "elasticsearch-refresher-%d", queueSize)); + esService = MoreExecutors.listeningDecorator(newFixedThreadPool(5, "elasticsearch-refresher-es-%d", queueSize)); + supplier.get().get(24, TimeUnit.HOURS); + log.info("End: Full refresh of metacat index in elastic search"); + if( delete) { + deleteUnmarkedEntities(qNames); + } + } catch (Exception e) { + log.error("Full refresh of metacat index failed", e); + CounterWrapper.incrementCounter("dse.metacat.elasticSearchMetacatRefreshFailureCount"); + } finally { + try { + shutdown(service); + shutdown(esService); + } finally { + isElasticSearchMetacatRefreshAlreadyRunning.set(false); + log.info("### Time taken to complete {} is {} ms", requestName, timer.stop()); + } + } + + } else { + log.info("Full refresh of metacat index is already running."); + CounterWrapper.incrementCounter("dse.metacat.elasticSearchMetacatRefreshAlreadyRunning"); + } + } + + private void shutdown(ListeningExecutorService executorService) { + if( executorService != null){ + executorService.shutdown(); + try { + // Wait a while for existing tasks to terminate + if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) { + executorService.shutdownNow(); // Cancel currently executing tasks + // Wait a while for tasks to respond to being cancelled + if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) + log.warn("Thread pool for metacat refresh did not terminate"); + } + } catch (InterruptedException ie) { + // (Re-)Cancel if current thread also interrupted + executorService.shutdownNow(); + // Preserve interrupt status + Thread.currentThread().interrupt(); + } + } + } + + private void deleteUnmarkedEntities(List qNames) { + log.info("Start: Delete unmarked entities"); + // + // get unmarked qualified names + // check if it not exists + // delete + // + elasticSearchUtil.refresh(); + MetacatContext context = new MetacatContext("admin", "metacat-refresh", null, null, null); + List unmarkedDatabaseDtos = elasticSearchUtil.getQualifiedNamesByMarkerByNames("database", qNames, refreshMarker, DatabaseDto.class); + if( !unmarkedDatabaseDtos.isEmpty()) { + if(unmarkedDatabaseDtos.size() <= config.getElasticSearchThresholdUnmarkedDatabasesDelete()) { + List deleteDatabaseDtos = unmarkedDatabaseDtos.stream().filter(databaseDto -> { + boolean result = false; + try { + DatabaseDto dto = databaseService.get(databaseDto.getName(), false); + if (dto == null) { + result = true; + } + } catch (NotFoundException | MetacatNotFoundException ignored) { + result = true; + } catch (Exception ignored) {} + return result; + }).collect(Collectors.toList()); + if (!deleteDatabaseDtos.isEmpty()) { + List deleteDatabaseQualifiedNames = deleteDatabaseDtos.stream() + .map(DatabaseDto::getName) + .collect(Collectors.toList()); + List deleteDatabaseNames = deleteDatabaseQualifiedNames.stream().map( + QualifiedName::toString).collect(Collectors.toList()); + userMetadataService.deleteDefinitionMetadatas(deleteDatabaseQualifiedNames); + elasticSearchUtil.softDelete("database", deleteDatabaseNames, context); + } + }else { + log.info("Count of unmarked databases({}) is more than the threshold {}", unmarkedDatabaseDtos.size(), config.getElasticSearchThresholdUnmarkedDatabasesDelete()); + CounterWrapper.incrementCounter("dse.metacat.counter.unmarked.databases.threshold.crossed"); + } + } + + List unmarkedTableDtos = elasticSearchUtil.getQualifiedNamesByMarkerByNames("table", qNames, refreshMarker, TableDto.class); + if( !unmarkedTableDtos.isEmpty() ) { + if(unmarkedTableDtos.size() <= config.getElasticSearchThresholdUnmarkedTablesDelete()) { + List deleteTableDtos = unmarkedTableDtos.stream().filter(tableDto -> { + boolean result = false; + try { + Optional dto = tableService.get(tableDto.getName(), false); + if (!dto.isPresent()) { + result = true; + } + } catch (NotFoundException | MetacatNotFoundException ignored) { + result = true; + } catch (Exception ignored) {} + return result; + }).collect(Collectors.toList()); + if (!deleteTableDtos.isEmpty()) { + List deleteTableNames = deleteTableDtos.stream().map( + dto -> dto.getName().toString()).collect(Collectors.toList()); + userMetadataService.deleteMetadatas(Lists.newArrayList(deleteTableDtos), false); + elasticSearchUtil.softDelete("table", deleteTableNames, context); + } + } else { + log.info("Count of unmarked tables({}) is more than the threshold {}", unmarkedTableDtos.size(), config.getElasticSearchThresholdUnmarkedTablesDelete()); + CounterWrapper.incrementCounter("dse.metacat.counter.unmarked.tables.threshold.crossed"); + + } + } + log.info("End: Delete unmarked entities"); + } + + private ListenableFuture _processCatalogs(List catalogNames){ + log.info("Start: Full refresh of catalogs: {}", catalogNames); + List> getCatalogFutures = catalogNames.stream() + .map(catalogName -> service.submit(() -> { + CatalogDto result = null; + try { + result = getCatalog(catalogName); + } catch (Exception e) { + log.error("Failed to retrieve catalog: {}", catalogName); + elasticSearchUtil.log("ElasticSearchMetacatRefresh.getCatalog", catalog.name(), catalogName, null, e.getMessage(), e, true); + } + return result; + })) + .collect(Collectors.toList()); + return Futures.transform( Futures.successfulAsList(getCatalogFutures), + (AsyncFunction, Void>) input -> { + List> processCatalogFutures = input.stream().filter(notNull).map( + catalogDto -> { + List databaseNames = getDatabaseNamesToRefresh(catalogDto); + return _processDatabases(catalogDto.getName(), databaseNames); + }).filter(notNull).collect(Collectors.toList()); + return Futures.transform(Futures.successfulAsList(processCatalogFutures), Functions.constant(null)); + }); + } + + private List getDatabaseNamesToRefresh(CatalogDto catalogDto) { + List databasesToRefresh = catalogDto.getDatabases(); + if(!Strings.isNullOrEmpty(config.getElasticSearchRefreshIncludeDatabases())){ + List refreshDatabaseNames = Splitter.on(',').omitEmptyStrings().trimResults().splitToList( + config.getElasticSearchRefreshIncludeDatabases()); + databasesToRefresh = databasesToRefresh.stream() + .filter(refreshDatabaseNames::contains) + .collect(Collectors.toList()); + } + if(!Strings.isNullOrEmpty(config.getElasticSearchRefreshExcludeDatabases())){ + List excludeDatabaseNames = Splitter.on(',').omitEmptyStrings().trimResults().splitToList( + config.getElasticSearchRefreshExcludeDatabases()); + databasesToRefresh = databasesToRefresh.stream() + .filter(databaseName -> !excludeDatabaseNames.contains(databaseName)) + .collect(Collectors.toList()); + } + return databasesToRefresh.stream() + .map(s -> QualifiedName.ofDatabase(catalogDto.getName().getCatalogName(), s)) + .collect(Collectors.toList()); + } + + private List getCatalogNamesToRefresh() { + List result = null; + if(!Strings.isNullOrEmpty(config.getElasticSearchRefreshIncludeCatalogs())){ + result = Splitter.on(',').omitEmptyStrings().trimResults().splitToList(config.getElasticSearchRefreshIncludeCatalogs()); + } else { + result = getCatalogNames(); + } + return result; + } + + /** + * Process the list of databases + * @param catalogName catalog name + * @param databaseNames database names + * @return future + */ + private ListenableFuture _processDatabases(QualifiedName catalogName, List databaseNames){ + ListenableFuture resultFuture = null; + log.info("Full refresh of catalog {} for databases({}): {}", catalogName, databaseNames.size(), databaseNames); + List> getDatabaseFutures = databaseNames.stream() + .map(databaseName -> service.submit(() -> { + DatabaseDto result = null; + try { + result = getDatabase( databaseName); + } catch (Exception e) { + log.error("Failed to retrieve database: {}", databaseName); + elasticSearchUtil.log("ElasticSearchMetacatRefresh.getDatabase", database.name(), databaseName.toString(), null, e.getMessage(), e, true); + } + return result; + })) + .collect(Collectors.toList()); + + if( getDatabaseFutures != null && !getDatabaseFutures.isEmpty()) { + resultFuture = Futures.transform(Futures.successfulAsList(getDatabaseFutures), + (AsyncFunction, Void>) input -> { + ListenableFuture processDatabaseFuture = indexDatabaseDtos(catalogName, input); + List> processDatabaseFutures = input.stream().filter(notNull) + .map(databaseDto -> { + List tableNames = databaseDto.getTables().stream() + .map(s -> QualifiedName.ofTable(databaseDto.getName().getCatalogName(), + databaseDto.getName().getDatabaseName(), s)) + .collect(Collectors.toList()); + log.info("Full refresh of database {} for tables({}): {}", databaseDto.getName().toString(), databaseDto.getTables().size(), databaseDto.getTables()); + return processTables(databaseDto.getName(), tableNames); + }).filter(notNull).collect(Collectors.toList()); + processDatabaseFutures.add(processDatabaseFuture); + return Futures.transform(Futures.successfulAsList(processDatabaseFutures), Functions.constant(null)); + }); + } + + return resultFuture; + } + + /** + * Save all databases to index it in elastic search + * @param catalogName catalog name + * @param dtos database dtos + * @return future + */ + private ListenableFuture indexDatabaseDtos(QualifiedName catalogName, List dtos){ + return esService.submit(() -> { + List docs = dtos.stream() + .map( dto -> new ElasticSearchDoc( dto.getName().toString(), dto, "admin", false, refreshMarker)) + .collect(Collectors.toList()); + log.info("Saving databases for catalog: {}", catalogName); + elasticSearchUtil.save(database.name(), docs); + return null; + }); + } + + /** + * Process the list of tables in batches + * @param databaseName database name + * @param tableNames table names + * @return A future containing the tasks + */ + private ListenableFuture processTables(QualifiedName databaseName, List tableNames){ + List> tableNamesBatches = Lists.partition( tableNames, 500); + List> processTablesBatchFutures = tableNamesBatches.stream().map( + subTableNames -> _processTables( databaseName, subTableNames)).collect(Collectors.toList()); + + return Futures.transform(Futures.successfulAsList(processTablesBatchFutures),Functions.constant(null)); + } + + private ListenableFuture _processTables(QualifiedName databaseName, List tableNames){ + List>> getTableFutures = tableNames.stream() + .map(tableName -> service.submit(() -> { + Optional result = null; + try { + result = getTable( tableName); + } catch (Exception e) { + log.error("Failed to retrieve table: {}", tableName); + elasticSearchUtil.log("ElasticSearchMetacatRefresh.getTable", table.name(), tableName.toString(), null, e.getMessage(), e, true); + } + return result; + })) + .collect(Collectors.toList()); + + return Futures.transform(Futures.successfulAsList(getTableFutures), + (AsyncFunction>, Void>) input -> indexTableDtos( databaseName, input)); + } + + /** + * Save all tables to index it in elastic search + * @param databaseName database name + * @param dtos table dtos + * @return future + */ + private ListenableFuture indexTableDtos(QualifiedName databaseName, List> dtos){ + return esService.submit(() -> { + List docs = dtos.stream().filter(dto -> dto!= null && dto.isPresent()).map( + tableDtoOptional -> { + TableDto dto = tableDtoOptional.get(); + String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() + : "admin"; + return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, refreshMarker); + }).collect(Collectors.toList()); + log.info("Saving tables for database: {}", databaseName); + elasticSearchUtil.save(table.name(), docs); + return null; + }); + } + + /** + * Save all tables to index it in elastic search + * @param tableName database name + * @param dtos partition dtos + * @return future + */ + private ListenableFuture indexPartitionDtos(QualifiedName tableName, List dtos){ + return esService.submit(() -> { + List docs = dtos.stream().filter(dto -> dto!=null).map( + dto -> { + String userName = dto.getAudit() != null ? dto.getAudit().getCreatedBy() + : "admin"; + return new ElasticSearchDoc(dto.getName().toString(), dto, userName, false, refreshMarker); + }).collect(Collectors.toList()); + log.info("Saving partitions for tableName: {}", tableName); + elasticSearchUtil.save(partition.name(), docs); + return null; + }); + } + + protected List getCatalogNames() { + return catalogService.getCatalogNames().stream().map(CatalogMappingDto::getCatalogName).collect( + Collectors.toList()); + } + + protected CatalogDto getCatalog(String catalogName) { + return catalogService.get(QualifiedName.ofCatalog(catalogName)); + } + + protected DatabaseDto getDatabase(QualifiedName databaseName) { + return databaseService.get(databaseName, true); + } + + protected Optional getTable(QualifiedName tableName) { + return tableService.get(tableName, true); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtil.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtil.java new file mode 100644 index 000000000..58918cef8 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/ElasticSearchUtil.java @@ -0,0 +1,430 @@ +package com.netflix.metacat.main.services.search; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.github.rholder.retry.Retryer; +import com.github.rholder.retry.RetryerBuilder; +import com.github.rholder.retry.StopStrategies; +import com.github.rholder.retry.WaitStrategies; +import com.google.common.base.Throwables; +import com.google.common.collect.FluentIterable; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.QualifiedName; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.json.MetacatJson; +import com.netflix.metacat.common.monitoring.CounterWrapper; +import com.netflix.metacat.common.server.Config; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nullable; +import javax.inject.Inject; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Field.DELETED; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Field.USER; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.table; + +/** + * Created by amajumdar on 8/12/15. + */ +public class ElasticSearchUtil { + private XContentType contentType = Requests.INDEX_CONTENT_TYPE; + private static final String ES_INDEX = "metacat"; + private static final Retryer RETRY_ES_PUBLISH = RetryerBuilder.newBuilder() + .retryIfExceptionOfType(ElasticsearchException.class) + .withWaitStrategy(WaitStrategies.incrementingWait(10, TimeUnit.SECONDS, 30, TimeUnit.SECONDS)) + .withStopStrategy(StopStrategies.stopAfterAttempt(3)) + .build(); + private final Client client; + private final Config config; + private final MetacatJson metacatJson; + private static final Logger log = LoggerFactory.getLogger(ElasticSearchUtil.class); + + @Inject + public ElasticSearchUtil(@Nullable Client client, Config config, MetacatJson metacatJson) { + this.config = config; + this.client = client; + this.metacatJson = metacatJson; + } + + /** + * Delete index document + * @param type index type + * @param id entity id + */ + public void delete(String type, String id) { + try { + RETRY_ES_PUBLISH.call(() -> { + client.prepareDelete(ES_INDEX, type, id).execute().actionGet(); + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed deleting metadata of type %s with id %s.", type, id), e); + CounterWrapper.incrementCounter("dse.metacat.esDeleteFailure"); + log("ElasticSearchUtil.delete", type, id, null, e.getMessage(), e, true); + } + } + + /** + * Delete index documents + * @param type index type + * @param ids entity ids + */ + public void delete(String type, List ids) { + if(ids == null || ids.isEmpty()){ + return; + } + try { + RETRY_ES_PUBLISH.call(() -> { + BulkRequestBuilder bulkRequest = client.prepareBulk(); + ids.forEach(id -> bulkRequest.add( client.prepareDelete(ES_INDEX, type, id))); + BulkResponse bulkResponse = bulkRequest.execute().actionGet(); + if(bulkResponse.hasFailures()){ + for(BulkItemResponse item: bulkResponse.getItems()){ + if( item.isFailed()){ + log.error("Failed deleting metadata of type {} with id {}. Message: {}", type, item.getId(), item.getFailureMessage()); + CounterWrapper.incrementCounter("dse.metacat.esDeleteFailure"); + log("ElasticSearchUtil.bulkDelete", type, item.getId(), null, item.getFailureMessage(), null, true); + } + } + } + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed deleting metadata of type %s with ids %s", type, ids), e); + CounterWrapper.incrementCounter("dse.metacat.esBulkDeleteFailure"); + log("ElasticSearchUtil.bulkDelete", type, ids.toString(), null, e.getMessage(), e, true); + } + } + + /** + * Marks the document as deleted + * @param type index type + * @param id entity id + */ + public void softDelete(String type, String id, MetacatContext metacatContext) { + try { + RETRY_ES_PUBLISH.call(() -> { + XContentBuilder builder = XContentFactory.contentBuilder(contentType); + builder.startObject().field(DELETED, true).field(USER, + metacatContext.getUserName()).endObject(); + client.prepareUpdate(ES_INDEX, type, id).setDoc(builder).get(); + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed deleting metadata of type %s with id %s", type, id), e); + CounterWrapper.incrementCounter("dse.metacat.esDeleteFailure"); + log("ElasticSearchUtil.softDelete", type, id, null, e.getMessage(), e, true); + } + } + + /** + * Marks the documents as deleted + * @param type index type + * @param ids list of entity ids + */ + public void softDelete(String type, List ids, MetacatContext metacatContext) { + if( ids != null && !ids.isEmpty()) { + List> partitionedDocs = Lists.partition(ids, 100); + partitionedDocs.forEach(subIds -> _softDelete( type, subIds, metacatContext)); + } + } + + private void _softDelete(String type, List ids, MetacatContext metacatContext) { + try { + RETRY_ES_PUBLISH.call(() -> { + BulkRequestBuilder bulkRequest = client.prepareBulk(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType); + builder.startObject().field(DELETED, true).field(USER, + metacatContext.getUserName()).endObject(); + ids.forEach(id -> bulkRequest.add( client.prepareUpdate(ES_INDEX, type, id).setDoc(builder))); + BulkResponse bulkResponse = bulkRequest.execute().actionGet(); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse item : bulkResponse.getItems()) { + if (item.isFailed()) { + log.error("Failed soft deleting metadata of type {} with id {}. Message: {}", type, item.getId(), + item.getFailureMessage()); + CounterWrapper.incrementCounter("dse.metacat.esDeleteFailure"); + log("ElasticSearchUtil.bulkSoftDelete", type, item.getId(), null, item.getFailureMessage(), null, true); + } + } + } + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed soft deleting metadata of type %s with ids %s", type, ids), e); + CounterWrapper.incrementCounter("dse.metacat.esBulkDeleteFailure"); + log("ElasticSearchUtil.bulkSoftDelete", type, ids.toString(), null, e.getMessage(), e, true); + } + } + + /** + * Updates the documents with partial updates with the given fields + * @param type index type + * @param ids list of entity ids + */ + public void updates(String type, List ids, MetacatContext metacatContext, ObjectNode node) { + if(ids == null || ids.isEmpty()){ + return; + } + try { + RETRY_ES_PUBLISH.call(() -> { + BulkRequestBuilder bulkRequest = client.prepareBulk(); + ids.forEach(id -> { + node.put(USER, metacatContext.getUserName()); + bulkRequest.add( client.prepareUpdate(ES_INDEX, type, id).setDoc(metacatJson.toJsonAsBytes(node))); + }); + BulkResponse bulkResponse = bulkRequest.execute().actionGet(); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse item : bulkResponse.getItems()) { + if (item.isFailed()) { + log.error("Failed updating metadata of type {} with id {}. Message: {}", type, item.getId(), + item.getFailureMessage()); + CounterWrapper.incrementCounter("dse.metacat.esUpdateFailure"); + } + } + } + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed updating metadata of type %s with ids %s", type, ids), e); + CounterWrapper.incrementCounter("dse.metacat.esBulkUpdateFailure"); + log("ElasticSearchUtil.updates", type, ids.toString(), null, e.getMessage(), e, true); + } + } + + /** + * Save of a single entity + * @param type index type + * @param id id of the entity + * @param body source string of the entity + */ + public void save(String type, String id, String body) { + try { + RETRY_ES_PUBLISH.call(() -> { + client.prepareIndex(ES_INDEX, type, id).setSource(body).execute().actionGet(); + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed saving metadata of type %s with id %s", type, id), e); + CounterWrapper.incrementCounter("dse.metacat.esSaveFailure"); + log("ElasticSearchUtil.save", type, id, null, e.getMessage(), e, true); + } + } + + /** + * Bulk save of the entities + * @param type index type + * @param docs metacat documents + */ + public void save(String type, List docs) { + if( docs != null && !docs.isEmpty()) { + List> partitionedDocs = Lists.partition(docs, 100); + partitionedDocs.forEach(subDocs -> _save(type, subDocs)); + } + } + + /** + * Bulk save of the entities + * @param type index type + * @param docs metacat documents + */ + private void _save(String type, List docs) { + if( docs != null && !docs.isEmpty()) { + try { + RETRY_ES_PUBLISH.call(() -> { + BulkRequestBuilder bulkRequest = client.prepareBulk(); + docs.forEach(doc -> bulkRequest.add(client.prepareIndex(ES_INDEX, type, doc.getId()) + .setSource(doc.toJsonString()))); + if (bulkRequest.numberOfActions() > 0) { + BulkResponse bulkResponse = bulkRequest.execute().actionGet(); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse item : bulkResponse.getItems()) { + if (item.isFailed()) { + log.error("Failed saving metadata of type {} with id {}. Message: {}", type, item.getId(), + item.getFailureMessage()); + CounterWrapper.incrementCounter("dse.metacat.esSaveFailure"); + log("ElasticSearchUtil.bulkSave", type, item.getId(), null, + item.getFailureMessage(), null, true); + } + } + } + } + return null; + }); + } catch (Exception e) { + log.error(String.format("Failed saving metadatas of type %s", type), e); + CounterWrapper.incrementCounter("dse.metacat.esBulkSaveFailure"); + List docIds = docs.stream().map(ElasticSearchDoc::getId).collect(Collectors.toList()); + log("ElasticSearchUtil.bulkSave", type, docIds.toString(), null, e.getMessage(), e, true); + } + } + } + + public String toJsonString(String id, Object dto, MetacatContext context, boolean isDeleted){ + return new ElasticSearchDoc( id, dto, context.getUserName(), isDeleted).toJsonString(); + } + + + public List getTableIdsByUri(String type, String dataUri) { + List ids = Lists.newArrayList(); + if( dataUri != null) { + // + // Run the query and get the response. + SearchRequestBuilder request = client.prepareSearch(ES_INDEX) + .setTypes(type) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(QueryBuilders.termQuery("serde.uri", dataUri)) + .setSize(Integer.MAX_VALUE) + .setNoFields(); + SearchResponse response = request.execute().actionGet(); + if (response.getHits().hits().length != 0) { + ids = getIds(response); + } + } + return ids; + } + + public List getTableIdsByCatalogs(String type, List qualifiedNames) { + List ids = Lists.newArrayList(); + // + // Run the query and get the response. + SearchRequestBuilder request = client.prepareSearch(ES_INDEX) + .setTypes(type) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(QueryBuilders.termsQuery("name.qualifiedName.tree", qualifiedNames)) + .setSize(Integer.MAX_VALUE) + .setNoFields(); + SearchResponse response = request.execute().actionGet(); + if (response.getHits().hits().length != 0) { + ids = getIds(response); + } + return ids; + } + + public List getQualifiedNamesByMarkerByNames(String type, List qualifiedNames, String marker, Class valueType) { + List result = Lists.newArrayList(); + List names = qualifiedNames.stream().map(QualifiedName::toString).collect(Collectors.toList()); + // + // Run the query and get the response. + QueryBuilder queryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termsQuery("name.qualifiedName.tree", names)) + .must(QueryBuilders.termQuery("deleted_", false)) + .mustNot(QueryBuilders.termQuery("refreshMarker_", marker)); + SearchRequestBuilder request = client.prepareSearch(ES_INDEX) + .setTypes(type) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(queryBuilder) + .setSize(Integer.MAX_VALUE); + SearchResponse response = request.execute().actionGet(); + if (response.getHits().hits().length != 0) { + result.addAll( parseResponse(response, valueType)); + } + return result; + } + + private static List getIds(final SearchResponse response) { + return FluentIterable.from(response.getHits()).transform(SearchHit::getId).toList(); + } + + private List parseResponse(final SearchResponse response, final Class valueType) { + return FluentIterable.from(response.getHits()).transform(hit -> { + try { + return metacatJson.parseJsonValue(hit.getSourceAsString(), valueType); + } catch (Exception e) { + throw Throwables.propagate(e); + } + }).toList(); + } + + public void refresh(){ + client.admin().indices().refresh(new RefreshRequest(ES_INDEX)).actionGet(); + } + + public ElasticSearchDoc get(String type, String id) { + ElasticSearchDoc result = null; + GetResponse response = client.prepareGet(ES_INDEX, type, id).execute().actionGet(); + if( response.isExists()){ + result = ElasticSearchDoc.parse(response); + } + return result; + } + + public void delete(MetacatContext metacatContext, String type, boolean softDelete) { + SearchResponse response = client.prepareSearch(ES_INDEX) + .setSearchType(SearchType.SCAN) + .setScroll(new TimeValue(config.getElasticSearchScrollTimeout())) + .setSize(config.getElasticSearchScrollFetchSize()) + .setQuery(QueryBuilders.termQuery("_type", type)) + .setNoFields() + .execute() + .actionGet(); + while(true){ + response = client.prepareSearchScroll(response.getScrollId()).setScroll(new TimeValue(config.getElasticSearchScrollTimeout())).execute().actionGet(); + //Break condition: No hits are returned + if (response.getHits().getHits().length == 0) { + break; + } + List ids = getIds(response); + if( softDelete){ + softDelete( type, ids, metacatContext); + } else { + delete( type, ids); + } + } + } + + public void log(String method, String type, String name, String data, String logMessage, Exception ex, boolean error){ + try { + Map source = Maps.newHashMap(); + source.put("method", method); + source.put("name", name); + source.put("type", type); + source.put("data", data); + source.put("error", error); + source.put("message", logMessage); + source.put("details", Throwables.getStackTraceAsString(ex)); + client.prepareIndex(ES_INDEX, "metacat-log").setSource(source).execute().actionGet(); + } catch(Exception e){ + log.warn("Failed saving the log message in elastic search for method {}, name {}. Message: {}", method, name, e.getMessage()); + } + } + + public List simpleSearch(String searchString){ + List result = Lists.newArrayList(); + SearchResponse response = client.prepareSearch(ES_INDEX) + .setTypes(table.name()) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(QueryBuilders.termQuery("_all", searchString)) + .setSize(Integer.MAX_VALUE) + .execute() + .actionGet(); + if (response.getHits().hits().length != 0) { + result.addAll( parseResponse(response, TableDto.class)); + } + return result; + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/services/search/MetacatEventHandlers.java b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/MetacatEventHandlers.java new file mode 100644 index 000000000..cef2a7b3c --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/services/search/MetacatEventHandlers.java @@ -0,0 +1,159 @@ +package com.netflix.metacat.main.services.search; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.eventbus.Subscribe; +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.dto.DatabaseDto; +import com.netflix.metacat.common.dto.PartitionDto; +import com.netflix.metacat.common.dto.TableDto; +import com.netflix.metacat.common.json.MetacatJsonLocator; +import com.netflix.metacat.common.server.events.MetacatCreateDatabasePostEvent; +import com.netflix.metacat.common.server.events.MetacatCreateMViewPostEvent; +import com.netflix.metacat.common.server.events.MetacatCreateTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteDatabasePostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteMViewPartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteMViewPostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatDeleteTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent; +import com.netflix.metacat.common.server.events.MetacatSaveMViewPartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateMViewPostEvent; +import com.netflix.metacat.common.server.events.MetacatUpdateTablePostEvent; +import com.netflix.metacat.main.services.TableService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.database; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.mview; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.partition; +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type.table; + +public class MetacatEventHandlers { + private static final Logger log = LoggerFactory.getLogger(MetacatEventHandlers.class); + private final ElasticSearchUtil es; + private final TableService tableService; + + @Inject + public MetacatEventHandlers(ElasticSearchUtil es, TableService tableService) { + this.es = es; + this.tableService = tableService; + } + + @Subscribe + public void metacatCreateDatabasePostEventHandler(MetacatCreateDatabasePostEvent event) { + DatabaseDto dto = event.getDto(); + ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(),dto, event.getMetacatContext().getUserName(), false); + es.save(database.name(),doc.getId(), doc.toJsonString()); + } + + @Subscribe + public void metacatCreateMViewPostEventHandler(MetacatCreateMViewPostEvent event) { + TableDto dto = event.getDto(); + ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(),dto, event.getMetacatContext().getUserName(), false); + es.save(mview.name(), doc.getId(), doc.toJsonString()); + } + + @Subscribe + public void metacatCreateTablePostEventHandler(MetacatCreateTablePostEvent event) { + TableDto dto = event.getDto(); + ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(),dto, event.getMetacatContext().getUserName(), false); + es.save(table.name(), doc.getId(), doc.toJsonString()); + } + + @Subscribe + public void metacatDeleteDatabasePostEventHandler(MetacatDeleteDatabasePostEvent event) { + DatabaseDto dto = event.getDto(); + es.softDelete(database.name(), dto.getName().toString(), event.getMetacatContext()); + } + + @Subscribe + public void metacatDeleteMViewPostEventHandler(MetacatDeleteMViewPostEvent event) { + TableDto dto = event.getDto(); + es.softDelete(mview.name(), dto.getName().toString(), event.getMetacatContext()); + } + + @Subscribe + public void metacatDeleteTablePostEventHandler(MetacatDeleteTablePostEvent event) { + TableDto dto = event.getDto(); + es.softDelete(table.name(), dto.getName().toString(), event.getMetacatContext()); + } + + @Subscribe + public void metacatDeleteMViewPartitionPostEventHandler(MetacatDeleteMViewPartitionPostEvent event) { + List partitionIds = event.getPartitionIds(); + es.softDelete(partition.name(), partitionIds, event.getMetacatContext()); + } + + @Subscribe + public void metacatDeleteTablePartitionPostEventHandler(MetacatDeleteTablePartitionPostEvent event) { + List partitionIds = event.getPartitionIds(); + es.softDelete(partition.name(), partitionIds, event.getMetacatContext()); + } + + + @Subscribe + public void metacatRenameTablePostEventHandler(MetacatRenameTablePostEvent event) { + es.delete(table.name(), event.getName().toString()); + + TableDto dto = event.getDto(); + ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(),dto, event.getMetacatContext().getUserName(), false); + es.save(table.name(), doc.getId(), doc.toJsonString()); + } + + @Subscribe + public void metacatUpdateMViewPostEventHandler(MetacatUpdateMViewPostEvent event) { + TableDto dto = event.getDto(); + ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(),dto, event.getMetacatContext().getUserName(), false); + es.save(mview.name(), doc.getId(), doc.toJsonString()); + } + + @Subscribe + public void metacatUpdateTablePostEventHandler(MetacatUpdateTablePostEvent event) { + TableDto dto = event.getDto(); + if( dto == null){ + Optional oDto = tableService.get(event.getName(), true); + if( oDto.isPresent()){ + dto = oDto.get(); + event.setDto(dto); + } + } + if( dto != null) { + ElasticSearchDoc doc = new ElasticSearchDoc(dto.getName().toString(),dto, event.getMetacatContext().getUserName(), false); + es.save(table.name(), doc.getId(), doc.toJsonString()); + updateEntitiesWIthSameUri(table.name(), dto, event.getMetacatContext()); + } + } + + private void updateEntitiesWIthSameUri(String metadata_type, TableDto dto, MetacatContext metacatContext) { + List ids = es.getTableIdsByUri(metadata_type, dto.getDataUri()); + ObjectNode node = MetacatJsonLocator.INSTANCE.emptyObjectNode(); + node.put("dataMetadata", dto.getDataMetadata()); + es.updates(table.name(), ids, metacatContext, node); + } + + @Subscribe + public void metacatSaveMViewPartitionPostEventHandler(MetacatSaveMViewPartitionPostEvent event) { + List partitionDtos = event.getPartitions(); + MetacatContext context = event.getMetacatContext(); + List docs = partitionDtos.stream() + .map(dto -> new ElasticSearchDoc( dto.getName().toString(), dto, context.getUserName(), false)) + .collect(Collectors.toList()); + es.save(partition.name(), docs); + } + + @Subscribe + public void metacatSaveTablePartitionPostEventHandler(MetacatSaveTablePartitionPostEvent event) { + List partitionDtos = event.getPartitions(); + MetacatContext context = event.getMetacatContext(); + List docs = partitionDtos.stream() + .map( dto -> new ElasticSearchDoc( dto.getName().toString(), dto, context.getUserName(), false)) + .collect(Collectors.toList()); + es.save(partition.name(), docs); + } +} diff --git a/metacat-main/src/main/java/com/netflix/metacat/main/spi/MetacatCatalogConfig.java b/metacat-main/src/main/java/com/netflix/metacat/main/spi/MetacatCatalogConfig.java new file mode 100644 index 000000000..d513969c3 --- /dev/null +++ b/metacat-main/src/main/java/com/netflix/metacat/main/spi/MetacatCatalogConfig.java @@ -0,0 +1,87 @@ +package com.netflix.metacat.main.spi; + +import com.google.common.base.Splitter; +import com.google.common.base.Strings; + +import javax.annotation.Nonnull; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static com.google.common.base.Preconditions.checkArgument; + +public class MetacatCatalogConfig { + private static final Splitter COMMA_LIST_SPLITTER = Splitter.on(',').trimResults().omitEmptyStrings(); + private final boolean includeViewsWithTables; + private final List schemaBlacklist; + private final List schemaWhitelist; + private final int thriftPort; + private final String type; + + private MetacatCatalogConfig( + @Nonnull String type, + boolean includeViewsWithTables, + @Nonnull List schemaWhitelist, + @Nonnull List schemaBlacklist, + int thriftPort) { + this.type = type; + this.includeViewsWithTables = includeViewsWithTables; + this.schemaBlacklist = schemaBlacklist; + this.schemaWhitelist = schemaWhitelist; + this.thriftPort = thriftPort; + } + + public static MetacatCatalogConfig createFromMapAndRemoveProperties(String type, Map properties) { + checkArgument(!Strings.isNullOrEmpty(type), "type is required"); + String catalogType = properties.containsKey(Keys.CATALOG_TYPE) ? properties.remove(Keys.CATALOG_TYPE) : type; + List schemaWhitelist = properties.containsKey(Keys.SCHEMA_WHITELIST) ? + COMMA_LIST_SPLITTER.splitToList(properties.remove(Keys.SCHEMA_WHITELIST)) : + Collections.EMPTY_LIST; + + List schemaBlacklist = properties.containsKey(Keys.SCHEMA_BLACKLIST) ? + COMMA_LIST_SPLITTER.splitToList(properties.remove(Keys.SCHEMA_BLACKLIST)) : + Collections.EMPTY_LIST; + + boolean includeViewsWithTables = Boolean.parseBoolean(properties.remove(Keys.INCLUDE_VIEWS_WITH_TABLES)); + + int thriftPort = 0; + if (properties.containsKey(Keys.THRIFT_PORT)) { + thriftPort = Integer.valueOf(properties.remove(Keys.THRIFT_PORT)); + } + + return new MetacatCatalogConfig(catalogType, includeViewsWithTables, schemaWhitelist, schemaBlacklist, + thriftPort); + } + + public List getSchemaBlacklist() { + return schemaBlacklist; + } + + public List getSchemaWhitelist() { + return schemaWhitelist; + } + + public int getThriftPort() { + return thriftPort; + } + + public String getType() { + return type; + } + + public boolean isIncludeViewsWithTables() { + return includeViewsWithTables; + } + + public boolean isThriftInterfaceRequested() { + return thriftPort != 0; + } + + public static class Keys { + public static final String CATALOG_TYPE = "metacat.type"; + public static final String INCLUDE_VIEWS_WITH_TABLES = "metacat.schema.list-views-with-tables"; + public static final String SCHEMA_BLACKLIST = "metacat.schema.blacklist"; + public static final String SCHEMA_WHITELIST = "metacat.schema.whitelist"; + public static final String THRIFT_PORT = "metacat.thrift.port"; + } +} diff --git a/metacat-main/src/test/groovy/com/netflix/metacat/main/manager/BaseSpec.groovy b/metacat-main/src/test/groovy/com/netflix/metacat/main/manager/BaseSpec.groovy new file mode 100644 index 000000000..78d17af09 --- /dev/null +++ b/metacat-main/src/test/groovy/com/netflix/metacat/main/manager/BaseSpec.groovy @@ -0,0 +1,162 @@ +package com.netflix.metacat.main.manager + +import com.facebook.presto.Session +import com.facebook.presto.metadata.SessionPropertyManager +import com.facebook.presto.spi.security.Identity +import com.google.inject.Inject +import com.netflix.metacat.main.init.MetacatInitializationService +import com.netflix.metacat.main.init.MetacatServletModule +import com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataModule +import io.airlift.testing.mysql.TestingMySqlServer +import spock.guice.UseModules +import spock.lang.Ignore +import spock.lang.Shared +import spock.lang.Specification + +import java.sql.Connection +import java.sql.DriverManager +import java.sql.SQLException +import java.sql.Statement +import java.util.concurrent.atomic.AtomicBoolean + +import static com.facebook.presto.spi.type.TimeZoneKey.UTC_KEY +import static java.util.Locale.ENGLISH + +@UseModules([ + MetacatServletModule.class, + MysqlUserMetadataModule.class, +]) +@Ignore +class BaseSpec extends Specification { + + public static final Session TEST_SESSION = Session.builder(new SessionPropertyManager()) + .setIdentity(new Identity("user", Optional.empty())) + .setSource("source") + .setCatalog("example") + .setSchema("tiny") + .setTimeZoneKey(UTC_KEY) + .setLocale(ENGLISH) + .setRemoteUserAddress("address") + .setUserAgent("agent") + .build(); + public static final Session TEST_MYSQL_SESSION = Session.builder(new SessionPropertyManager()) + .setIdentity(new Identity("user", Optional.empty())) + .setSource("test") + .setCatalog("mysql") + .setSchema("metacat") + .setTimeZoneKey(UTC_KEY) + .setLocale(ENGLISH) + .build(); + public static final Session TEST_HIVE_SESSION = Session.builder(new SessionPropertyManager()) + .setIdentity(new Identity("user", Optional.empty())) + .setSource("test") + .setCatalog("hive") + .setSchema("metacat") + .setTimeZoneKey(UTC_KEY) + .setLocale(ENGLISH) + .build(); + public static final Session TEST_FRANKLIN_SESSION = Session.builder(new SessionPropertyManager()) + .setIdentity(new Identity("user", Optional.empty())) + .setSource("test") + .setCatalog("franklin") + .setSchema("metacat") + .setTimeZoneKey(UTC_KEY) + .setLocale(ENGLISH) + .build(); + private static final AtomicBoolean initialized = new AtomicBoolean(); + @Inject + @Shared + MetacatInitializationService metacatInitializationService + @Shared + TestingMySqlServer mysqlServer; + + def setupSpec() { + if (!initialized.compareAndSet(false, true)) { + return; + } + setupMysql() + metacatInitializationService.start() + } + + def setupMysql() { + mysqlServer = new TestingMySqlServer("test", "test", "example", "tpch") + Properties props = new Properties() + props.setProperty('connector.name', 'mysql') + props.setProperty('connection-url', mysqlServer.getJdbcUrl()) + props.setProperty('connection-user', mysqlServer.getUser()) + props.setProperty('connection-password', mysqlServer.getPassword()) + + File defaultFile = new File('src/test/resources/etc/catalog/default.properties') + if( !defaultFile.exists()){ + defaultFile = new File('metacat-main/src/test/resources/etc/catalog/default.properties') + } + props.store(new FileOutputStream(defaultFile), "test") + + props.setProperty('javax.jdo.option.url', mysqlServer.getJdbcUrl()) + props.setProperty('javax.jdo.option.username', mysqlServer.getUser()) + props.setProperty('javax.jdo.option.password', mysqlServer.getPassword()) + File metadataFile = new File('src/test/resources/usermetadata.properties') + if( !metadataFile.exists()){ + metadataFile = new File('metacat-main/src/test/resources/usermetadata.properties') + } + props.store(new FileOutputStream(metadataFile), "test") + + + File prepareFile = new File('src/test/resources/sql/prepare-test.sql') + if( !prepareFile.exists()){ + prepareFile = new File('metacat-main/src/test/resources/sql/prepare-test.sql') + } + runScript(DriverManager.getConnection(mysqlServer.getJdbcUrl()), new FileReader(prepareFile), ';') + } + + def runScript(Connection conn, Reader reader, String delimiter) throws IOException, + SQLException { + StringBuffer command = null; + try { + LineNumberReader lineReader = new LineNumberReader(reader); + String line = null; + while ((line = lineReader.readLine()) != null) { + if (command == null) { + command = new StringBuffer(); + } + String trimmedLine = line.trim(); + if (trimmedLine.startsWith("--")) { + println(trimmedLine); + } else if (trimmedLine.length() < 1 + || trimmedLine.startsWith("//")) { + // Do nothing + } else if (trimmedLine.length() < 1 + || trimmedLine.startsWith("--")) { + // Do nothing + } else if (trimmedLine.endsWith(delimiter)) { + command.append(line.substring(0, line + .lastIndexOf(delimiter))); + command.append(" "); + Statement statement = conn.createStatement(); + + println(command); + statement.execute(command.toString()); + + command = null; + try { + statement.close(); + } catch (Exception e) { + // Ignore to workaround a bug in Jakarta DBCP + } + Thread.yield(); + } else { + command.append(line); + command.append(" "); + } + } + } catch (Exception e) { + throw e; + } + } + + def cleanupSpec() { + if (mysqlServer != null) { + mysqlServer.close() + } + } +} diff --git a/metacat-main/src/test/groovy/com/netflix/metacat/main/manager/PluginManagerSpec.groovy b/metacat-main/src/test/groovy/com/netflix/metacat/main/manager/PluginManagerSpec.groovy new file mode 100644 index 000000000..41ea1b123 --- /dev/null +++ b/metacat-main/src/test/groovy/com/netflix/metacat/main/manager/PluginManagerSpec.groovy @@ -0,0 +1,70 @@ +package com.netflix.metacat.main.manager + +import com.facebook.presto.metadata.QualifiedTableName +import com.facebook.presto.metadata.QualifiedTablePrefix +import com.google.inject.Inject +import com.netflix.metacat.main.presto.metadata.MetadataManager +import spock.lang.Ignore + +/** + * Created by amajumdar on 1/15/15. + */ +class PluginManagerSpec extends BaseSpec { + @Inject + PluginManager pluginManager + @Inject + MetadataManager metadataManager + + def testLoadPlugins() { + when: + pluginManager.loadPlugins() + then: + notThrown(Exception) + } + + def testMetadataManagerMySql() { + when: + metadataManager.listSchemaNames(TEST_MYSQL_SESSION, "default") + metadataManager.listTables(TEST_SESSION, new QualifiedTablePrefix("default", "example")) + def handle = metadataManager.getTableHandle(TEST_MYSQL_SESSION, QualifiedTableName.valueOf("default.example.numbers")) + metadataManager.getTableMetadata(TEST_MYSQL_SESSION, handle.get()) + metadataManager.getCatalogNames() + then: + notThrown(Exception) + } + + def testMetadataManagerExample() { + when: + metadataManager.listSchemaNames(TEST_SESSION, "example") + metadataManager.listTables(TEST_SESSION, new QualifiedTablePrefix("example", "example")) + def handle = metadataManager.getTableHandle(TEST_SESSION, QualifiedTableName.valueOf("example.example.numbers")) + metadataManager.getTableMetadata(TEST_SESSION, handle.get()) + metadataManager.getCatalogNames() + then: + notThrown(Exception) + } + + @Ignore + def testMetadataManagerHive() { + when: + metadataManager.listSchemaNames(TEST_HIVE_SESSION, "testhive") + metadataManager.listTables(TEST_HIVE_SESSION, new QualifiedTablePrefix("testhive", "charsmith")) + def handle = metadataManager.getTableHandle(TEST_HIVE_SESSION, QualifiedTableName.valueOf("testhive.charsmith.temp")) + metadataManager.getTableMetadata(TEST_HIVE_SESSION, handle.get()) + metadataManager.getCatalogNames() + then: + notThrown(Exception) + } + + @Ignore + def testMetadataManagerFranklin() { + when: + metadataManager.listSchemaNames(TEST_FRANKLIN_SESSION, "s3") + metadataManager.listTables(TEST_HIVE_SESSION, new QualifiedTablePrefix("s3", "charsmith")) + def handle = metadataManager.getTableHandle(TEST_HIVE_SESSION, QualifiedTableName.valueOf("s3.charsmith.part")) + metadataManager.getTableMetadata(TEST_HIVE_SESSION, handle.get()) + metadataManager.getCatalogNames() + then: + notThrown(Exception) + } +} diff --git a/metacat-main/src/test/groovy/com/netflix/metacat/main/search/BaseEsSpec.groovy b/metacat-main/src/test/groovy/com/netflix/metacat/main/search/BaseEsSpec.groovy new file mode 100644 index 000000000..521d207be --- /dev/null +++ b/metacat-main/src/test/groovy/com/netflix/metacat/main/search/BaseEsSpec.groovy @@ -0,0 +1,58 @@ +package com.netflix.metacat.main.search + +import com.netflix.metacat.common.MetacatContext +import com.netflix.metacat.common.json.MetacatJson +import com.netflix.metacat.common.json.MetacatJsonLocator +import com.netflix.metacat.common.server.Config +import com.netflix.metacat.main.services.search.ElasticSearchUtil +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.elasticsearch.client.Client +import org.elasticsearch.common.settings.ImmutableSettings +import org.elasticsearch.common.settings.Settings +import spock.lang.Shared +import spock.lang.Specification + +/** + * Created by amajumdar on 8/17/15. + */ +class BaseEsSpec extends Specification { + @Shared + Config config = Mock(Config) + @Shared + ElasticSearchUtil es + @Shared + MetacatJson metacatJson + @Shared + MetacatContext metacatContext = new MetacatContext("test", "testApp", "testClientId", "testJobId", null) + + def setupSpec() { + Settings settings = ImmutableSettings.settingsBuilder() + .put("node.http.enabled", false) + .put("index.gateway.type", "none") + .put("index.store.type", "memory") + .put("index.refresh_interval", "1s") + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build(); + Client client = org.elasticsearch.node.NodeBuilder.nodeBuilder().local(true).settings(settings).node().client() + // First delete the index if created previously + if( client.admin().indices().exists(new IndicesExistsRequest('metacat')).actionGet().exists) { + client.admin().indices().delete(new DeleteIndexRequest('metacat')).actionGet() + } + // Create a new index + def index = new CreateIndexRequest('metacat') + index.source(getFile('metacat.json').getText()) + client.admin().indices().create( index).actionGet() + metacatJson = MetacatJsonLocator.INSTANCE + es = new ElasticSearchUtil(client, config, metacatJson) + } + + def getFile(String name){ + def f = new File('../metacat-main/src/test/resources/search/mapping/' + name) + if(!f.exists()){ + f = new File('metacat-main/src/test/resources/search/mapping/' + name) + } + return f + } +} diff --git a/metacat-main/src/test/groovy/com/netflix/metacat/main/search/ElasticSearchUtilSpec.groovy b/metacat-main/src/test/groovy/com/netflix/metacat/main/search/ElasticSearchUtilSpec.groovy new file mode 100644 index 000000000..fd9d59952 --- /dev/null +++ b/metacat-main/src/test/groovy/com/netflix/metacat/main/search/ElasticSearchUtilSpec.groovy @@ -0,0 +1,123 @@ +package com.netflix.metacat.main.search + +import com.netflix.metacat.common.MetacatContext +import com.netflix.metacat.common.dto.TableDto +import com.netflix.metacat.common.json.MetacatJsonLocator +import com.netflix.metacat.common.util.DataProvider +import com.netflix.metacat.main.services.search.ElasticSearchDoc +import spock.lang.Unroll + +import static com.netflix.metacat.main.services.search.ElasticSearchDoc.Type + +/** + * Created by amajumdar on 8/17/15. + */ +class ElasticSearchUtilSpec extends BaseEsSpec{ + + @Unroll + def "Test save for #id"(){ + given: + def table = DataProvider.getTable(catalogName, databaseName, tableName, "amajumdar", "s3:/a/b") + es.save(Type.table.name(), id, es.toJsonString(id, table, metacatContext, false)) + def result = (TableDto)es.get(Type.table.name(),id).getDto() + expect: + id==result.getName().toString() + where: + catalogName | databaseName | tableName | id + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' + } + + @Unroll + def "Test saves for #id"(){ + given: + def table = DataProvider.getTable(catalogName, databaseName, tableName, "amajumdar", "s3:/a/b/c") + es.save(Type.table.name(),[new ElasticSearchDoc(table.name.toString(), table, metacatContext.userName, false)]) + def result = (TableDto)es.get(Type.table.name(),id).getDto() + expect: + id==result.getName().toString() + where: + catalogName | databaseName | tableName | id + 'prodhive' | 'amajumdar' | 'part_test' | 'prodhive/amajumdar/part_test' + } + + @Unroll + def "Test delete for #id"(){ + given: + def table = DataProvider.getTable(catalogName, databaseName, tableName, "amajumdar", "s3:/a/b") + es.save(Type.table.name(), id, es.toJsonString(id, table, metacatContext, false)) + softDelete?es.softDelete(Type.table.name(), id, metacatContext):es.delete(Type.table.name(), id) + def result = es.get(Type.table.name(),id) + expect: + if( softDelete){ + id==((TableDto)result.getDto()).getName().toString() + } else { + result == null + } + where: + catalogName | databaseName | tableName | id | softDelete + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' | false + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' | true + } + + @Unroll + def "Test deletes for #id"(){ + given: + def table = DataProvider.getTable(catalogName, databaseName, tableName, "amajumdar", "s3:/a/b") + es.save(Type.table.name(), id, es.toJsonString(id, table, metacatContext, false)) + softDelete?es.softDelete(Type.table.name(), [id], metacatContext):es.delete(Type.table.name(), [id]) + def result = es.get(Type.table.name(),id) + expect: + if( softDelete){ + id==((TableDto)result.getDto()).getName().toString() + } else { + result == null + } + where: + catalogName | databaseName | tableName | id | softDelete + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' | false + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' | true + } + + @Unroll + def "Test updates for #id"(){ + given: + def table = DataProvider.getTable(catalogName, databaseName, tableName, metacatContext.getUserName(), uri) + es.save(Type.table.name(), id, es.toJsonString(id, table, metacatContext, false)) + es.updates(Type.table.name(), [id], new MetacatContext("testUpdate", null, null, null, null), MetacatJsonLocator.INSTANCE.parseJsonObject('{"dataMetadata": {"metrics":{"count":10}}}')) + def result = es.get(Type.table.name(),id) + es.refresh() + def resultByUri = es.getTableIdsByUri(Type.table.name(), uri) + expect: + result != null + result.getUser()=="testUpdate" + ((TableDto)result.getDto()).getDataMetadata()!=null + resultByUri!=null + resultByUri.size()==1 + resultByUri[0]==id + where: + catalogName | databaseName | tableName | id | uri + 'prodhive' | 'amajumdar' | 'part' | 'prodhive/amajumdar/part' | 's3:/a/b' + } + + @Unroll + def "Test deletes for #type"(){ + given: + def tables = DataProvider.getTables(catalogName, databaseName, tableName, "amajumdar", "s3:/a/b", noOfTables) + def docs = tables.collect{ + String userName = it.getAudit() != null ? it.getAudit().getCreatedBy() + : "admin"; + return new ElasticSearchDoc(it.getName().toString(), it, userName, false, null) + } + es.save(Type.table.name(), docs) + es.refresh() + es.delete( new MetacatContext("testUpdate", null, null, null, null), Type.table.name(), softDelete) + where: + catalogName | databaseName | tableName | noOfTables | softDelete + 'prodhive' | 'amajumdar' | 'part' | 10 | false + 'prodhive' | 'amajumdar' | 'part' | 0 | false + 'prodhive' | 'amajumdar' | 'part' | 1000 | false + 'prodhive' | 'amajumdar' | 'part' | 10 | true + 'prodhive' | 'amajumdar' | 'part' | 0 | true + } +} diff --git a/metacat-main/src/test/resources/etc/catalog/default.properties b/metacat-main/src/test/resources/etc/catalog/default.properties new file mode 100644 index 000000000..254845be1 --- /dev/null +++ b/metacat-main/src/test/resources/etc/catalog/default.properties @@ -0,0 +1,6 @@ +#test +#Sat Mar 19 12:48:46 PDT 2016 +connector.name=mysql +connection-url=jdbc\:mysql\://localhost\:62771?user\=test&password\=test +connection-password=test +connection-user=test diff --git a/metacat-main/src/test/resources/etc/catalog/example.properties b/metacat-main/src/test/resources/etc/catalog/example.properties new file mode 100644 index 000000000..88aeedd8b --- /dev/null +++ b/metacat-main/src/test/resources/etc/catalog/example.properties @@ -0,0 +1,2 @@ +connector.name=example-http +metadata-uri=http://s3.amazonaws.com/presto-example/v2/example-metadata.json diff --git a/metacat-main/src/test/resources/etc/catalog/hive-local.properties b/metacat-main/src/test/resources/etc/catalog/hive-local.properties new file mode 100644 index 000000000..9537647c1 --- /dev/null +++ b/metacat-main/src/test/resources/etc/catalog/hive-local.properties @@ -0,0 +1,5 @@ +connector.name=metacat-hive +hive.metastore.uri=thrift://localhost:9093 +node.environment=default +hive.allow-rename-table=true +hive.allow-drop-table=true diff --git a/metacat-main/src/test/resources/search/mapping/metacat.json b/metacat-main/src/test/resources/search/mapping/metacat.json new file mode 100644 index 000000000..bd5c84257 --- /dev/null +++ b/metacat-main/src/test/resources/search/mapping/metacat.json @@ -0,0 +1,307 @@ +{ + "settings": { + "analysis": { + "analyzer": { + "paths": { + "tokenizer": "path_hierarchy" + }, + "path": { + "type": "pattern", + "pattern": "/" + } + } + } + }, + "mappings": { + "_default_": { + "_timestamp" : { + "enabled" : true, + "store" : true + }, + "_all": { "enabled": true }, + "properties": { + "audit": { + "properties": { + "createdBy": { + "type": "string", + "index": "not_analyzed" + }, + "lastModifiedBy": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "serde": { + "properties": { + "inputFormat": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "outputFormat": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "owner": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "serializationLib": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "uri": { + "type": "string", + "index": "not_analyzed", + "fields": { + "tree": { + "type": "string", + "analyzer": "paths" + } + } + } + } + }, + "definitionMetadata": { + "properties": { + "job": { + "properties": { + "name": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "owner": { + "properties": { + "name": { + "type": "string", + "index": "not_analyzed" + }, + "team": { + "type": "string", + "index": "not_analyzed" + }, + "userId": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "tags":{ + "type": "string", + "index": "not_analyzed" + }, + "data_hygiene": { + "properties": { + "delete_column": { + "type": "string", + "index": "not_analyzed" + }, + "delete_method": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "lifetime": { + "properties": { + "partitionedBy": { + "type": "string", + "index": "not_analyzed" + }, + "user": { + "type": "string", + "index": "not_analyzed" + } + } + } + } + }, + "dataMetadata":{ + "properties": { + "com.netflix.dse.mds.metric.TopK": { + "type": "object", + "enabled": false + }, + "com.netflix.dse.mds.metric.GenieJobId": { + "properties": { + "value": { + "type": "string", + "index": "not_analyzed" + } + } + } + } + }, + "name": { + "properties": { + "catalogName": { + "type": "string", + "index": "not_analyzed" + }, + "databaseName": { + "type": "string", + "index": "not_analyzed" + }, + "qualifiedName": { + "type": "string", + "index": "not_analyzed", + "fields": { + "tree": { + "type": "string", + "analyzer": "paths" + } + } + }, + "tableName": { + "type": "string", + "index": "not_analyzed" + }, + "viewName": { + "type": "string", + "index": "not_analyzed" + }, + "partitionName": { + "type": "string", + "index": "not_analyzed", + "fields": { + "keys": { + "type": "string", + "analyzer": "path" + } + } + } + } + }, + "user_": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "type": { + "type": "string", + "index": "not_analyzed" + }, + "refreshMarker_": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "table": { + "properties": { + "fields": { + "properties": { + "name": { + "type": "string", + "index": "not_analyzed" + }, + "type": { + "type": "string", + "index": "not_analyzed" + }, + "source_type": { + "type": "string", + "index": "not_analyzed" + } + } + } + } + }, + "mview": { + "properties": { + "fields": { + "properties": { + "name": { + "type": "string", + "index": "not_analyzed" + }, + "type": { + "type": "string", + "index": "not_analyzed" + } + } + } + } + }, + "partition": { + "properties": { + "audit": { + "properties": { + "createdBy": { + "type": "string", + "index": "not_analyzed" + }, + "lastModifiedBy": { + "type": "string", + "index": "not_analyzed" + } + } + } + } + }, + "metacat-log": { + "properties": { + "method": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "name": { + "type": "string", + "fields": { + "raw": { + "type": "string", + "index": "not_analyzed" + } + } + }, + "type": { + "type": "string", + "index": "not_analyzed" + }, + "data": { + "type": "string", + "index": "not_analyzed" + }, + "message": { + "type": "string", + "index": "not_analyzed" + }, + "details": { + "type": "string", + "index": "not_analyzed" + } + } + } + } +} \ No newline at end of file diff --git a/metacat-main/src/test/resources/sql/prepare-test.sql b/metacat-main/src/test/resources/sql/prepare-test.sql new file mode 100644 index 000000000..136b719a4 --- /dev/null +++ b/metacat-main/src/test/resources/sql/prepare-test.sql @@ -0,0 +1,6 @@ +CREATE SCHEMA if not exists example; +CREATE TABLE example.numbers(text varchar(20) primary key, value bigint); +INSERT INTO example.numbers(text, value) VALUES ('one', 1),('two', 2),('three', 3),('ten', 10),('eleven', 11),('twelve', 12); +CREATE SCHEMA if not exists tpch; +CREATE TABLE tpch.orders(orderkey bigint primary key, custkey bigint); +CREATE TABLE tpch.lineitem(orderkey bigint primary key, partkey bigint); diff --git a/metacat-main/src/test/resources/usermetadata.properties b/metacat-main/src/test/resources/usermetadata.properties new file mode 100644 index 000000000..c3e44f793 --- /dev/null +++ b/metacat-main/src/test/resources/usermetadata.properties @@ -0,0 +1,9 @@ +#test +#Sat Mar 19 12:48:46 PDT 2016 +javax.jdo.option.password=test +connector.name=mysql +connection-url=jdbc\:mysql\://localhost\:62771?user\=test&password\=test +javax.jdo.option.url=jdbc\:mysql\://localhost\:62771?user\=test&password\=test +connection-password=test +javax.jdo.option.username=test +connection-user=test diff --git a/metacat-mysql-connector/build.gradle b/metacat-mysql-connector/build.gradle new file mode 100644 index 000000000..d8ad74e17 --- /dev/null +++ b/metacat-mysql-connector/build.gradle @@ -0,0 +1,5 @@ +dependencies { + compile project(':metacat-common-server') + + compile "com.facebook.presto:presto-mysql:${presto_version}" +} diff --git a/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlClient.java b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlClient.java new file mode 100644 index 000000000..b0fad9553 --- /dev/null +++ b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlClient.java @@ -0,0 +1,180 @@ +package com.netflix.metacat.plugin.mysql; + +import com.facebook.presto.plugin.ColumnDetailHandle; +import com.facebook.presto.plugin.jdbc.BaseJdbcClient; +import com.facebook.presto.plugin.jdbc.BaseJdbcConfig; +import com.facebook.presto.plugin.jdbc.JdbcConnectorId; +import com.facebook.presto.plugin.jdbc.JdbcTableHandle; +import com.facebook.presto.plugin.mysql.MySqlClient; +import com.facebook.presto.plugin.mysql.MySqlConfig; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.type.FloatType; +import com.facebook.presto.type.IntType; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.mysql.jdbc.Driver; +import com.netflix.metacat.common.util.DataSourceManager; + +import javax.inject.Inject; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; +import static java.util.Locale.ENGLISH; + +public class MetacatMySqlClient extends BaseJdbcClient { + private static final Map METACAT_SQL_TYPES = ImmutableMap.builder() + .put(INT, IntType.TYPE) + .put(FLOAT, FloatType.TYPE) + .build(); + @Inject + public MetacatMySqlClient(JdbcConnectorId connectorId, BaseJdbcConfig config, MySqlConfig mySqlConfig) throws SQLException { + super(connectorId, config, "`", DataSourceManager.get().getDriver(connectorId.toString(), new Driver())); + connectionProperties.setProperty("nullCatalogMeansCurrent", "false"); + if (mySqlConfig.isAutoReconnect()) { + connectionProperties.setProperty("autoReconnect", String.valueOf(mySqlConfig.isAutoReconnect())); + connectionProperties.setProperty("maxReconnects", String.valueOf(mySqlConfig.getMaxReconnects())); + } + if (mySqlConfig.getConnectionTimeout() != null) { + connectionProperties.setProperty("connectTimeout", String.valueOf(mySqlConfig.getConnectionTimeout().toMillis())); + } + } + + @Override + protected ResultSet getTables(Connection connection, String schemaName, String tableName) throws SQLException { + // For Metacat's purposes a view and a table are the same + return connection.getMetaData().getTables(schemaName, null, tableName, new String[] { "TABLE", "VIEW" }); + } + + @Override + protected Type toPrestoType(int jdbcType) + { + switch (jdbcType) { + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + return INT; + case Types.FLOAT: + return FLOAT; + default: + return super.toPrestoType(jdbcType); + } + } + + protected String toSqlType(Type type) + { + String sqlType = METACAT_SQL_TYPES.get(type); + if (sqlType != null) { + return sqlType; + } else { + sqlType = super.toSqlType(type); + switch (sqlType) { + case "varchar": + return "mediumtext"; + case "varbinary": + return "mediumblob"; + case "time with timezone": + return "time"; + case "timestamp": + case "timestamp with timezone": + return "datetime"; + } + return sqlType; + } + } + + + public List getColumnsWithDetails(JdbcTableHandle tableHandle) + { + try (Connection connection = driver.connect(connectionUrl, connectionProperties)) { + DatabaseMetaData metadata = connection.getMetaData(); + try (ResultSet resultSet = metadata.getColumns(tableHandle.getCatalogName(), tableHandle.getSchemaName(), + tableHandle.getTableName(), null); + ResultSet indexSet = metadata.getIndexInfo(tableHandle.getCatalogName(), tableHandle.getSchemaName(), + tableHandle.getTableName(), false, true)) { + List columns = new ArrayList<>(); + Set indexColumns = Sets.newHashSet(); + while( indexSet.next()){ + String columnName = indexSet.getString("COLUMN_NAME"); + if( columnName != null) { + indexColumns.add(columnName); + } + } + boolean found = false; + while (resultSet.next()) { + found = true; + Type columnType = toPrestoType(resultSet.getInt("DATA_TYPE")); + + // skip unsupported column types + if (columnType != null) { + String columnName = resultSet.getString("COLUMN_NAME"); + String sourceType = resultSet.getString("TYPE_NAME"); + Integer size = resultSet.getInt("COLUMN_SIZE"); + Boolean isNullable = "yes".equalsIgnoreCase(resultSet.getString("IS_NULLABLE")); + String defaultValue = resultSet.getString("COLUMN_DEF"); + String comment = resultSet.getString("REMARKS"); + Boolean isIndexKey = indexColumns.contains(columnName); + columns.add(new ColumnDetailHandle(connectorId, columnName, columnType, false, comment, sourceType, size, isNullable, defaultValue, null, isIndexKey)); + } + } + if (!found) { + throw new TableNotFoundException(tableHandle.getSchemaTableName()); + } + if (columns.isEmpty()) { + throw new PrestoException(NOT_SUPPORTED, "Table has no supported column types: " + tableHandle.getSchemaTableName()); + } + return ImmutableList.copyOf(columns); + } + } + catch (SQLException e) { + throw Throwables.propagate(e); + } + } + + @Override + public Set getSchemaNames() + { + // for MySQL, we need to list catalogs instead of schemas + try (Connection connection = driver.connect(connectionUrl, connectionProperties); + ResultSet resultSet = connection.getMetaData().getCatalogs()) { + ImmutableSet.Builder schemaNames = ImmutableSet.builder(); + while (resultSet.next()) { + String schemaName = resultSet.getString("TABLE_CAT").toLowerCase(ENGLISH); + // skip internal schemas + if (!schemaName.equals("information_schema") && !schemaName.equals("mysql")) { + schemaNames.add(schemaName); + } + } + return schemaNames.build(); + } + catch (SQLException e) { + throw Throwables.propagate(e); + } + } + + @Override + protected SchemaTableName getSchemaTableName(ResultSet resultSet) + throws SQLException + { + // MySQL uses catalogs instead of schemas + return new SchemaTableName( + resultSet.getString("TABLE_CAT").toLowerCase(ENGLISH), + resultSet.getString("TABLE_NAME").toLowerCase(ENGLISH)); + + } +} diff --git a/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlClientModule.java b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlClientModule.java new file mode 100644 index 000000000..7f5cb6bdc --- /dev/null +++ b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlClientModule.java @@ -0,0 +1,21 @@ +package com.netflix.metacat.plugin.mysql; + +import com.facebook.presto.plugin.jdbc.BaseJdbcConfig; +import com.facebook.presto.plugin.jdbc.JdbcClient; +import com.facebook.presto.plugin.mysql.MySqlClientModule; +import com.facebook.presto.plugin.mysql.MySqlConfig; +import com.google.inject.Binder; +import com.google.inject.Scopes; + +import static io.airlift.configuration.ConfigBinder.configBinder; + +public class MetacatMySqlClientModule extends MySqlClientModule { + @Override + public void configure(Binder binder) { + binder.bind(JdbcClient.class).to(MetacatMySqlClient.class).in(Scopes.SINGLETON); + binder.bind(MySqlJdbcConnector.class).in(Scopes.SINGLETON); + binder.bind(MySqlJdbcMetadata.class).in(Scopes.SINGLETON); + configBinder(binder).bindConfig(BaseJdbcConfig.class); + configBinder(binder).bindConfig(MySqlConfig.class); + } +} diff --git a/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlPlugin.java b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlPlugin.java new file mode 100644 index 000000000..a8756f6c7 --- /dev/null +++ b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MetacatMySqlPlugin.java @@ -0,0 +1,44 @@ +package com.netflix.metacat.plugin.mysql; + +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.Plugin; +import com.facebook.presto.spi.type.Type; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.inject.Module; + +import java.util.List; +import java.util.Map; + +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.base.Preconditions.checkNotNull; + +public class MetacatMySqlPlugin implements Plugin { + private static final String NAME = "metacat-mysql"; + private static final Module MODULE = new MetacatMySqlClientModule(); + private Map optionalConfig = ImmutableMap.of(); + + @Override + public void setOptionalConfig(Map optionalConfig) + { + this.optionalConfig = ImmutableMap.copyOf(checkNotNull(optionalConfig, "optionalConfig is null")); + } + + @Override + public List getServices(Class type) + { + if (type == ConnectorFactory.class) { + return ImmutableList.of(type.cast(new MySqlJdbcConnectorFactory(NAME, MODULE, optionalConfig, getClassLoader()))); + } else if (type == Type.class){ + return ImmutableList.of(type.cast(FLOAT), type.cast(INT)); + } + return ImmutableList.of(); + } + + private static ClassLoader getClassLoader() + { + return firstNonNull(Thread.currentThread().getContextClassLoader(), MetacatMySqlPlugin.class.getClassLoader()); + } +} diff --git a/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcConnector.java b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcConnector.java new file mode 100644 index 000000000..7fe024143 --- /dev/null +++ b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcConnector.java @@ -0,0 +1,26 @@ +package com.netflix.metacat.plugin.mysql; + +import com.facebook.presto.plugin.jdbc.JdbcConnector; +import com.facebook.presto.plugin.jdbc.JdbcHandleResolver; +import com.facebook.presto.plugin.jdbc.JdbcRecordSetProvider; +import com.facebook.presto.plugin.jdbc.JdbcRecordSinkProvider; +import com.facebook.presto.plugin.jdbc.JdbcSplitManager; +import io.airlift.bootstrap.LifeCycleManager; + +import javax.inject.Inject; + +/** + * Created by amajumdar on 9/30/15. + */ +public class MySqlJdbcConnector extends JdbcConnector{ + @Inject + public MySqlJdbcConnector(LifeCycleManager lifeCycleManager, + MySqlJdbcMetadata jdbcMetadata, + JdbcSplitManager jdbcSplitManager, + JdbcRecordSetProvider jdbcRecordSetProvider, + JdbcHandleResolver jdbcHandleResolver, + JdbcRecordSinkProvider jdbcRecordSinkProvider) { + super(lifeCycleManager, jdbcMetadata, jdbcSplitManager, jdbcRecordSetProvider, jdbcHandleResolver, + jdbcRecordSinkProvider); + } +} diff --git a/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcConnectorFactory.java b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcConnectorFactory.java new file mode 100644 index 000000000..9f32e91ff --- /dev/null +++ b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcConnectorFactory.java @@ -0,0 +1,64 @@ +package com.netflix.metacat.plugin.mysql; + +import com.facebook.presto.plugin.jdbc.JdbcModule; +import com.facebook.presto.spi.Connector; +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.classloader.ThreadContextClassLoader; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableMap; +import com.google.inject.Injector; +import com.google.inject.Module; +import com.netflix.metacat.common.util.DataSourceManager; +import io.airlift.bootstrap.Bootstrap; + +import java.util.Map; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Strings.isNullOrEmpty; + +public class MySqlJdbcConnectorFactory + implements ConnectorFactory +{ + private final String name; + private final Module module; + private final Map optionalConfig; + private final ClassLoader classLoader; + + public MySqlJdbcConnectorFactory(String name, Module module, Map optionalConfig, ClassLoader classLoader) + { + checkArgument(!isNullOrEmpty(name), "name is null or empty"); + this.name = name; + this.module = checkNotNull(module, "module is null"); + this.optionalConfig = ImmutableMap.copyOf(checkNotNull(optionalConfig, "optionalConfig is null")); + this.classLoader = checkNotNull(classLoader, "classLoader is null"); + } + + @Override + public String getName() + { + return name; + } + + @Override + public Connector create(String connectorId, Map requiredConfig) + { + checkNotNull(requiredConfig, "requiredConfig is null"); + checkNotNull(optionalConfig, "optionalConfig is null"); + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { + DataSourceManager.get().load(connectorId, requiredConfig); + Bootstrap app = new Bootstrap(new JdbcModule(connectorId), module); + + Injector injector = app + .doNotInitializeLogging() + .setRequiredConfigurationProperties(requiredConfig) + .setOptionalConfigurationProperties(optionalConfig) + .initialize(); + + return injector.getInstance(MySqlJdbcConnector.class); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } +} diff --git a/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcMetadata.java b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcMetadata.java new file mode 100644 index 000000000..8e3e878a3 --- /dev/null +++ b/metacat-mysql-connector/src/main/java/com/netflix/metacat/plugin/mysql/MySqlJdbcMetadata.java @@ -0,0 +1,65 @@ +package com.netflix.metacat.plugin.mysql; + +import com.facebook.presto.plugin.ColumnDetailHandle; +import com.facebook.presto.plugin.jdbc.JdbcClient; +import com.facebook.presto.plugin.jdbc.JdbcConnectorId; +import com.facebook.presto.plugin.jdbc.JdbcMetadata; +import com.facebook.presto.plugin.jdbc.JdbcMetadataConfig; +import com.facebook.presto.plugin.jdbc.JdbcTableHandle; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import javax.inject.Inject; +import java.util.Map; + +import static com.facebook.presto.plugin.jdbc.Types.checkType; + +/** + * Created by amajumdar on 9/30/15. + */ +public class MySqlJdbcMetadata extends JdbcMetadata{ + private final MetacatMySqlClient jdbcClient; + @Inject + public MySqlJdbcMetadata(JdbcConnectorId connectorId, + JdbcClient jdbcClient, + JdbcMetadataConfig config) { + super(connectorId, jdbcClient, config); + this.jdbcClient = (MetacatMySqlClient) jdbcClient; + } + + @Override + public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table) + { + JdbcTableHandle handle = checkType(table, JdbcTableHandle.class, "tableHandle"); + + ImmutableList.Builder columnMetadata = ImmutableList.builder(); + for (ColumnDetailHandle column : jdbcClient.getColumnsWithDetails(handle)) { + columnMetadata.add(column.getColumnMetadata()); + } + return new ConnectorTableMetadata(handle.getSchemaTableName(), columnMetadata.build()); + } + + @Override + public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) + { + JdbcTableHandle jdbcTableHandle = checkType(tableHandle, JdbcTableHandle.class, "tableHandle"); + + ImmutableMap.Builder columnHandles = ImmutableMap.builder(); + for (ColumnDetailHandle column : jdbcClient.getColumnsWithDetails(jdbcTableHandle)) { + columnHandles.put(column.getColumnMetadata().getName(), column); + } + return columnHandles.build(); + } + + @Override + public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle) + { + checkType(tableHandle, JdbcTableHandle.class, "tableHandle"); + return checkType(columnHandle, ColumnDetailHandle.class, "columnHandle").getColumnMetadata(); + } +} diff --git a/metacat-mysql-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin b/metacat-mysql-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin new file mode 100644 index 000000000..907c8e2af --- /dev/null +++ b/metacat-mysql-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin @@ -0,0 +1 @@ +com.netflix.metacat.plugin.mysql.MetacatMySqlPlugin diff --git a/metacat-postgres-connector/build.gradle b/metacat-postgres-connector/build.gradle new file mode 100644 index 000000000..dfa1fc5de --- /dev/null +++ b/metacat-postgres-connector/build.gradle @@ -0,0 +1,5 @@ +dependencies { + compile project(':metacat-common-server') + + compile "com.facebook.presto:presto-postgresql:${presto_version}" +} diff --git a/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlClient.java b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlClient.java new file mode 100644 index 000000000..552ccc39d --- /dev/null +++ b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlClient.java @@ -0,0 +1,133 @@ +package com.netflix.metacat.plugin.postgresql; + +import com.facebook.presto.plugin.ColumnDetailHandle; +import com.facebook.presto.plugin.jdbc.BaseJdbcClient; +import com.facebook.presto.plugin.jdbc.BaseJdbcConfig; +import com.facebook.presto.plugin.jdbc.JdbcConnectorId; +import com.facebook.presto.plugin.jdbc.JdbcOutputTableHandle; +import com.facebook.presto.plugin.jdbc.JdbcTableHandle; +import com.facebook.presto.plugin.postgresql.PostgreSqlClient; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.type.FloatType; +import com.facebook.presto.type.IntType; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; +import com.netflix.metacat.common.util.DataSourceManager; +import io.airlift.slice.Slice; +import org.postgresql.Driver; + +import javax.inject.Inject; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; + + +public class MetacatPostgreSqlClient extends BaseJdbcClient { + private static final String SQL_GET_DIST_SORT_KEYS = "select column, distkey, sortkey from pg_catalog.pg_table_def where schemaname=? and tablename=?"; + private static final Map METACAT_SQL_TYPES = ImmutableMap.builder() + .put(INT, IntType.TYPE) + .put(FLOAT, FloatType.TYPE) + .build(); + @Inject + public MetacatPostgreSqlClient(JdbcConnectorId connectorId, BaseJdbcConfig config) throws SQLException { + super(connectorId, config, "\"", DataSourceManager.get().getDriver(connectorId.toString(), new Driver())); + } + + public List getColumnsWithDetails(JdbcTableHandle tableHandle) + { + try (Connection connection = driver.connect(connectionUrl, connectionProperties)) { + DatabaseMetaData metadata = connection.getMetaData(); + try (ResultSet resultSet = metadata.getColumns(tableHandle.getCatalogName(), tableHandle.getSchemaName(), + tableHandle.getTableName(), null)) { + List columns = new ArrayList<>(); + Map> distSortMap = getDistSortMap(tableHandle.getSchemaName(), + tableHandle.getTableName()); + boolean found = false; + while (resultSet.next()) { + found = true; + Type columnType = toPrestoType(resultSet.getInt("DATA_TYPE")); + + // skip unsupported column types + if (columnType != null) { + String columnName = resultSet.getString("COLUMN_NAME"); + String sourceType = resultSet.getString("TYPE_NAME"); + Integer size = resultSet.getInt("COLUMN_SIZE"); + Boolean isNullable = "yes".equalsIgnoreCase(resultSet.getString("IS_NULLABLE")); + String defaultValue = resultSet.getString("COLUMN_DEF"); + String comment = resultSet.getString("REMARKS"); + Map.Entry distSort = distSortMap.get(columnName); + Boolean isPartitionKey = Boolean.FALSE; + Boolean isSortKey = Boolean.FALSE; + if( distSort != null){ + isPartitionKey = distSort.getKey(); + isSortKey = distSort.getValue(); + } + columns.add(new ColumnDetailHandle(connectorId, columnName, columnType, isPartitionKey, comment, sourceType, size, isNullable, defaultValue, isSortKey, null)); + } + } + if (!found) { + throw new TableNotFoundException(tableHandle.getSchemaTableName()); + } + if (columns.isEmpty()) { + throw new PrestoException(NOT_SUPPORTED, "Table has no supported column types: " + tableHandle.getSchemaTableName()); + } + return ImmutableList.copyOf(columns); + } + } + catch (SQLException e) { + throw Throwables.propagate(e); + } + } + + @SuppressWarnings("JpaQueryApiInspection") + private Map> getDistSortMap(String schemaName, String tableName){ + Map> result = Maps.newHashMap(); + try (Connection connection = driver.connect(connectionUrl, connectionProperties); + PreparedStatement stmt = connection.prepareStatement(SQL_GET_DIST_SORT_KEYS)) { + stmt.setString(1, schemaName); + stmt.setString(2, tableName); + try(ResultSet resultSet = stmt.executeQuery()){ + while( resultSet.next()){ + String columnName = resultSet.getString("column"); + Boolean distKey = resultSet.getBoolean("distkey"); + Boolean sortKey = resultSet.getBoolean("sortkey"); + result.put( columnName, new AbstractMap.SimpleImmutableEntry<>(distKey, sortKey)); + } + } + }catch (SQLException ignored) {} + return result; + } + + @Override + public void commitCreateTable(JdbcOutputTableHandle handle, Collection fragments) + { + // PostgreSQL does not allow qualifying the target of a rename + StringBuilder sql = new StringBuilder() + .append("ALTER TABLE ") + .append(quoted(handle.getCatalogName(), handle.getSchemaName(), handle.getTemporaryTableName())) + .append(" RENAME TO ") + .append(quoted(handle.getTableName())); + + try (Connection connection = getConnection(handle)) { + execute(connection, sql.toString()); + } + catch (SQLException e) { + throw Throwables.propagate(e); + } + } +} diff --git a/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlClientModule.java b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlClientModule.java new file mode 100644 index 000000000..313cc5a3c --- /dev/null +++ b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlClientModule.java @@ -0,0 +1,19 @@ +package com.netflix.metacat.plugin.postgresql; + +import com.facebook.presto.plugin.jdbc.BaseJdbcConfig; +import com.facebook.presto.plugin.jdbc.JdbcClient; +import com.facebook.presto.plugin.postgresql.PostgreSqlClientModule; +import com.google.inject.Binder; +import com.google.inject.Scopes; + +import static io.airlift.configuration.ConfigBinder.configBinder; + +public class MetacatPostgreSqlClientModule extends PostgreSqlClientModule { + @Override + public void configure(Binder binder) { + binder.bind(JdbcClient.class).to(MetacatPostgreSqlClient.class).in(Scopes.SINGLETON); + binder.bind(PostgreSqlJdbcConnector.class).in(Scopes.SINGLETON); + binder.bind(PostgreSqlJdbcMetadata.class).in(Scopes.SINGLETON); + configBinder(binder).bindConfig(BaseJdbcConfig.class); + } +} diff --git a/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlPlugin.java b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlPlugin.java new file mode 100644 index 000000000..0936f823f --- /dev/null +++ b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/MetacatPostgreSqlPlugin.java @@ -0,0 +1,44 @@ +package com.netflix.metacat.plugin.postgresql; + +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.Plugin; +import com.facebook.presto.spi.type.Type; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.inject.Module; + +import java.util.List; +import java.util.Map; + +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; +import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.base.Preconditions.checkNotNull; + +public class MetacatPostgreSqlPlugin implements Plugin { + private static final String NAME = "metacat-postgresql"; + private static final Module MODULE = new MetacatPostgreSqlClientModule(); + private Map optionalConfig = ImmutableMap.of(); + + @Override + public void setOptionalConfig(Map optionalConfig) + { + this.optionalConfig = ImmutableMap.copyOf(checkNotNull(optionalConfig, "optionalConfig is null")); + } + + @Override + public List getServices(Class type) + { + if (type == ConnectorFactory.class) { + return ImmutableList.of(type.cast(new PostgreSqlJdbcConnectorFactory(NAME, MODULE, optionalConfig, getClassLoader()))); + } else if (type == Type.class){ + return ImmutableList.of(type.cast(FLOAT), type.cast(INT)); + } + return ImmutableList.of(); + } + + private static ClassLoader getClassLoader() + { + return firstNonNull(Thread.currentThread().getContextClassLoader(), MetacatPostgreSqlPlugin.class.getClassLoader()); + } +} diff --git a/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcConnector.java b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcConnector.java new file mode 100644 index 000000000..dce8f15d3 --- /dev/null +++ b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcConnector.java @@ -0,0 +1,26 @@ +package com.netflix.metacat.plugin.postgresql; + +import com.facebook.presto.plugin.jdbc.JdbcConnector; +import com.facebook.presto.plugin.jdbc.JdbcHandleResolver; +import com.facebook.presto.plugin.jdbc.JdbcRecordSetProvider; +import com.facebook.presto.plugin.jdbc.JdbcRecordSinkProvider; +import com.facebook.presto.plugin.jdbc.JdbcSplitManager; +import io.airlift.bootstrap.LifeCycleManager; + +import javax.inject.Inject; + +/** + * Created by amajumdar on 9/30/15. + */ +public class PostgreSqlJdbcConnector extends JdbcConnector{ + @Inject + public PostgreSqlJdbcConnector(LifeCycleManager lifeCycleManager, + PostgreSqlJdbcMetadata jdbcMetadata, + JdbcSplitManager jdbcSplitManager, + JdbcRecordSetProvider jdbcRecordSetProvider, + JdbcHandleResolver jdbcHandleResolver, + JdbcRecordSinkProvider jdbcRecordSinkProvider) { + super(lifeCycleManager, jdbcMetadata, jdbcSplitManager, jdbcRecordSetProvider, jdbcHandleResolver, + jdbcRecordSinkProvider); + } +} diff --git a/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcConnectorFactory.java b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcConnectorFactory.java new file mode 100644 index 000000000..c48dee53e --- /dev/null +++ b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcConnectorFactory.java @@ -0,0 +1,65 @@ +package com.netflix.metacat.plugin.postgresql; + +import com.facebook.presto.plugin.jdbc.JdbcModule; +import com.facebook.presto.spi.Connector; +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.classloader.ThreadContextClassLoader; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableMap; +import com.google.inject.Injector; +import com.google.inject.Module; +import com.netflix.metacat.common.util.DataSourceManager; +import io.airlift.bootstrap.Bootstrap; + +import java.util.Map; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Strings.isNullOrEmpty; + +public class PostgreSqlJdbcConnectorFactory + implements ConnectorFactory +{ + private final String name; + private final Module module; + private final Map optionalConfig; + private final ClassLoader classLoader; + + public PostgreSqlJdbcConnectorFactory(String name, Module module, Map optionalConfig, + ClassLoader classLoader) + { + checkArgument(!isNullOrEmpty(name), "name is null or empty"); + this.name = name; + this.module = checkNotNull(module, "module is null"); + this.optionalConfig = ImmutableMap.copyOf(checkNotNull(optionalConfig, "optionalConfig is null")); + this.classLoader = checkNotNull(classLoader, "classLoader is null"); + } + + @Override + public String getName() + { + return name; + } + + @Override + public Connector create(String connectorId, Map requiredConfig) + { + checkNotNull(requiredConfig, "requiredConfig is null"); + checkNotNull(optionalConfig, "optionalConfig is null"); + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { + DataSourceManager.get().load(connectorId, requiredConfig); + Bootstrap app = new Bootstrap(new JdbcModule(connectorId), module); + + Injector injector = app + .doNotInitializeLogging() + .setRequiredConfigurationProperties(requiredConfig) + .setOptionalConfigurationProperties(optionalConfig) + .initialize(); + + return injector.getInstance(PostgreSqlJdbcConnector.class); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } +} diff --git a/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcMetadata.java b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcMetadata.java new file mode 100644 index 000000000..a9460f4fb --- /dev/null +++ b/metacat-postgres-connector/src/main/java/com/netflix/metacat/plugin/postgresql/PostgreSqlJdbcMetadata.java @@ -0,0 +1,65 @@ +package com.netflix.metacat.plugin.postgresql; + +import com.facebook.presto.plugin.ColumnDetailHandle; +import com.facebook.presto.plugin.jdbc.JdbcClient; +import com.facebook.presto.plugin.jdbc.JdbcConnectorId; +import com.facebook.presto.plugin.jdbc.JdbcMetadata; +import com.facebook.presto.plugin.jdbc.JdbcMetadataConfig; +import com.facebook.presto.plugin.jdbc.JdbcTableHandle; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import javax.inject.Inject; +import java.util.Map; + +import static com.facebook.presto.plugin.jdbc.Types.checkType; + +/** + * Created by amajumdar on 9/30/15. + */ +public class PostgreSqlJdbcMetadata extends JdbcMetadata{ + private final MetacatPostgreSqlClient jdbcClient; + @Inject + public PostgreSqlJdbcMetadata(JdbcConnectorId connectorId, + JdbcClient jdbcClient, + JdbcMetadataConfig config) { + super(connectorId, jdbcClient, config); + this.jdbcClient = (MetacatPostgreSqlClient) jdbcClient; + } + + @Override + public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table) + { + JdbcTableHandle handle = checkType(table, JdbcTableHandle.class, "tableHandle"); + + ImmutableList.Builder columnMetadata = ImmutableList.builder(); + for (ColumnDetailHandle column : jdbcClient.getColumnsWithDetails(handle)) { + columnMetadata.add(column.getColumnMetadata()); + } + return new ConnectorTableMetadata(handle.getSchemaTableName(), columnMetadata.build()); + } + + @Override + public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) + { + JdbcTableHandle jdbcTableHandle = checkType(tableHandle, JdbcTableHandle.class, "tableHandle"); + + ImmutableMap.Builder columnHandles = ImmutableMap.builder(); + for (ColumnDetailHandle column : jdbcClient.getColumnsWithDetails(jdbcTableHandle)) { + columnHandles.put(column.getColumnMetadata().getName(), column); + } + return columnHandles.build(); + } + + @Override + public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle) + { + checkType(tableHandle, JdbcTableHandle.class, "tableHandle"); + return checkType(columnHandle, ColumnDetailHandle.class, "columnHandle").getColumnMetadata(); + } +} diff --git a/metacat-postgres-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin b/metacat-postgres-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin new file mode 100644 index 000000000..5b9cf712d --- /dev/null +++ b/metacat-postgres-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin @@ -0,0 +1 @@ +com.netflix.metacat.plugin.postgresql.MetacatPostgreSqlPlugin diff --git a/metacat-s3-connector/build.gradle b/metacat-s3-connector/build.gradle new file mode 100644 index 000000000..e606f3e1b --- /dev/null +++ b/metacat-s3-connector/build.gradle @@ -0,0 +1,15 @@ +dependencies { + compile project(':metacat-converters') + compile 'org.hibernate:hibernate-entitymanager:5.0.0.Final' + compile 'mysql:mysql-connector-java:5.1.35' + compile 'joda-time:joda-time:2.8.2' + compile "com.google.guava:guava:${guava_version}" + compile "com.google.inject:guice:${guice_version}" + compile "com.google.inject.extensions:guice-persist:${guice_version}" + compile "com.google.inject.extensions:guice-multibindings:${guice_version}" + compile "com.google.inject.extensions:guice-servlet:${guice_version}" + compile "com.facebook.presto:presto-spi:${presto_version}" + compile "com.facebook.presto:presto-main:${presto_version}" + compile "com.facebook.presto:presto-hive-hadoop2:${presto_version}" + testCompile 'io.airlift:testing-mysql-server:0.1' +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3ConnectorFactory.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3ConnectorFactory.java new file mode 100644 index 000000000..24e6583a9 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3ConnectorFactory.java @@ -0,0 +1,142 @@ +package com.netflix.metacat.s3.connector; + +import com.facebook.presto.hive.HiveClientModule; +import com.facebook.presto.hive.HiveConnector; +import com.facebook.presto.hive.HiveConnectorFactory; +import com.facebook.presto.hive.HiveSessionProperties; +import com.facebook.presto.hive.HiveTableProperties; +import com.facebook.presto.hive.NoSecurityModule; +import com.facebook.presto.hive.ReadOnlySecurityModule; +import com.facebook.presto.hive.RebindSafeMBeanServer; +import com.facebook.presto.hive.SecurityConfig; +import com.facebook.presto.hive.SqlStandardSecurityModule; +import com.facebook.presto.spi.Connector; +import com.facebook.presto.spi.ConnectorHandleResolver; +import com.facebook.presto.spi.ConnectorMetadata; +import com.facebook.presto.spi.ConnectorPageSourceProvider; +import com.facebook.presto.spi.ConnectorRecordSinkProvider; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.classloader.ClassLoaderSafeConnectorHandleResolver; +import com.facebook.presto.spi.classloader.ClassLoaderSafeConnectorPageSourceProvider; +import com.facebook.presto.spi.classloader.ClassLoaderSafeConnectorRecordSinkProvider; +import com.facebook.presto.spi.classloader.ThreadContextClassLoader; +import com.facebook.presto.spi.security.ConnectorAccessControl; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableSet; +import com.google.inject.Injector; +import com.google.inject.Module; +import com.google.inject.persist.PersistService; +import com.google.inject.persist.jpa.JpaPersistModule; +import com.google.inject.util.Modules; +import com.netflix.metacat.common.server.CommonModule; +import com.netflix.metacat.common.util.DataSourceManager; +import com.netflix.metacat.converters.ConvertersModule; +import io.airlift.bootstrap.Bootstrap; +import io.airlift.bootstrap.LifeCycleManager; +import io.airlift.json.JsonModule; +import io.airlift.node.NodeModule; +import org.weakref.jmx.guice.MBeanModule; + +import javax.management.MBeanServer; +import java.lang.management.ManagementFactory; +import java.util.Map; +import java.util.Properties; + +import static com.facebook.presto.hive.ConditionalModule.installModuleIf; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 10/9/15. + */ +public class S3ConnectorFactory extends HiveConnectorFactory { + private static final String NAME = "metacat-s3"; + private final Map optionalConfig; + private final ClassLoader classLoader; + private final TypeManager typeManager; + + public S3ConnectorFactory(TypeManager typeManager, Map optionalConfig, ClassLoader classLoader) { + super(NAME, optionalConfig, classLoader, null, typeManager); + this.optionalConfig = optionalConfig; + this.classLoader = classLoader; + this.typeManager = typeManager; + } + + @Override + public Connector create(String connectorId, Map config) + { + checkNotNull(config, "config is null"); + + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { + HiveClientModule hiveClientModule = new HiveClientModule(connectorId, null, typeManager); + Module module = Modules.override(hiveClientModule).with(new S3Module()); + + //JPA module + Properties props = new Properties(); + props.putAll(config); + props.put("hibernate.connection.datasource", DataSourceManager.get().load( connectorId, config).get( connectorId)); + Module jpaModule = new JpaPersistModule("s3").properties(props); + + Bootstrap app = new Bootstrap( + new NodeModule(), + new MBeanModule(), + new JsonModule(), + module, + installModuleIf( + SecurityConfig.class, + security -> "none".equalsIgnoreCase(security.getSecuritySystem()), + new NoSecurityModule()), + installModuleIf( + SecurityConfig.class, + security -> "read-only".equalsIgnoreCase(security.getSecuritySystem()), + new ReadOnlySecurityModule()), + installModuleIf( + SecurityConfig.class, + security -> "sql-standard".equalsIgnoreCase(security.getSecuritySystem()), + new SqlStandardSecurityModule()), + binder -> { + MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer(); + binder.bind(MBeanServer.class).toInstance(new RebindSafeMBeanServer(platformMBeanServer)); + }, + jpaModule, + new CommonModule(), + new ConvertersModule() + ); + + Injector injector = app + .doNotInitializeLogging() + .setRequiredConfigurationProperties(config) + .setOptionalConfigurationProperties(optionalConfig) + .initialize(); + + PersistService persistService = injector.getInstance(PersistService.class); + persistService.start(); + + LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class); + ConnectorMetadata metadata = injector.getInstance(ConnectorMetadata.class); + ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class); + ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class); + ConnectorRecordSinkProvider recordSinkProvider = injector.getInstance(ConnectorRecordSinkProvider.class); + ConnectorHandleResolver handleResolver = injector.getInstance(ConnectorHandleResolver.class); + HiveSessionProperties hiveSessionProperties = injector.getInstance(HiveSessionProperties.class); + HiveTableProperties hiveTableProperties = injector.getInstance(HiveTableProperties.class); + ConnectorAccessControl accessControl = injector.getInstance(ConnectorAccessControl.class); + + return new HiveConnector( + lifeCycleManager, + metadata, + splitManager, + new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), + new ClassLoaderSafeConnectorRecordSinkProvider(recordSinkProvider, classLoader), + new ClassLoaderSafeConnectorHandleResolver(handleResolver, classLoader), + ImmutableSet.of(), + hiveSessionProperties.getSessionProperties(), + hiveTableProperties.getTableProperties(), + accessControl); + } + catch (Exception e) { + throw Throwables.propagate(e); + } + } + +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3DetailMetadata.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3DetailMetadata.java new file mode 100644 index 000000000..17d26c235 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3DetailMetadata.java @@ -0,0 +1,622 @@ +package com.netflix.metacat.s3.connector; + +import com.facebook.presto.exception.SchemaAlreadyExistsException; +import com.facebook.presto.hive.HdfsEnvironment; +import com.facebook.presto.hive.HiveColumnHandle; +import com.facebook.presto.hive.HiveConnectorId; +import com.facebook.presto.hive.HiveOutputTableHandle; +import com.facebook.presto.hive.HiveStorageFormat; +import com.facebook.presto.hive.HiveTableHandle; +import com.facebook.presto.hive.HiveTableProperties; +import com.facebook.presto.hive.HiveType; +import com.facebook.presto.hive.TableAlreadyExistsException; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorDetailMetadata; +import com.facebook.presto.spi.ConnectorInsertTableHandle; +import com.facebook.presto.spi.ConnectorOutputTableHandle; +import com.facebook.presto.spi.ConnectorSchemaMetadata; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.PrestoException; +import com.facebook.presto.spi.SchemaNotFoundException; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.SchemaTablePrefix; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.inject.persist.Transactional; +import com.netflix.metacat.s3.connector.dao.DatabaseDao; +import com.netflix.metacat.s3.connector.dao.FieldDao; +import com.netflix.metacat.s3.connector.dao.SourceDao; +import com.netflix.metacat.s3.connector.dao.TableDao; +import com.netflix.metacat.s3.connector.model.Database; +import com.netflix.metacat.s3.connector.model.Field; +import com.netflix.metacat.s3.connector.model.Info; +import com.netflix.metacat.s3.connector.model.Location; +import com.netflix.metacat.s3.connector.model.Schema; +import com.netflix.metacat.s3.connector.model.Table; +import com.netflix.metacat.s3.connector.util.ConverterUtil; +import io.airlift.slice.Slice; +import org.apache.hadoop.fs.Path; +import org.weakref.jmx.internal.guava.base.StandardSystemProperty; + +import javax.inject.Inject; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +import static com.facebook.presto.hive.HiveColumnHandle.SAMPLE_WEIGHT_COLUMN_NAME; +import static com.facebook.presto.hive.HiveErrorCode.HIVE_DATABASE_LOCATION_ERROR; +import static com.facebook.presto.hive.HiveErrorCode.HIVE_FILESYSTEM_ERROR; +import static com.facebook.presto.hive.HiveErrorCode.HIVE_PATH_ALREADY_EXISTS; +import static com.facebook.presto.hive.HiveUtil.schemaTableName; +import static com.facebook.presto.hive.util.Types.checkType; +import static com.facebook.presto.spi.StandardErrorCode.NOT_SUPPORTED; +import static com.facebook.presto.spi.type.BigintType.BIGINT; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Strings.isNullOrEmpty; +import static java.lang.String.format; + +/** + * Created by amajumdar on 10/9/15. + */ +@Transactional +public class S3DetailMetadata implements ConnectorDetailMetadata { + @Inject + SourceDao sourceDao; + @Inject + DatabaseDao databaseDao; + @Inject + TableDao tableDao; + @Inject + FieldDao fieldDao; + @Inject + HiveConnectorId connectorId; + @Inject + ConverterUtil converterUtil; + @Inject + private TypeManager typeManager; + @Inject + HdfsEnvironment hdfsEnvironment; + @Override + public void createSchema(ConnectorSession session, ConnectorSchemaMetadata schema) { + String schemaName = schema.getSchemaName(); + checkNotNull(schemaName, "Schema name is null"); + if( databaseDao.getBySourceDatabaseName(connectorId.toString(), schemaName) != null){ + throw new SchemaAlreadyExistsException(schemaName); + } + Database database = new Database(); + database.setName(schemaName); + database.setSource(sourceDao.getByName(connectorId.toString())); + databaseDao.save(database); + } + + @Override + public void updateSchema(ConnectorSession session, ConnectorSchemaMetadata schema) { + // no op + } + + @Override + public void dropSchema(ConnectorSession session, String schemaName) { + checkNotNull(schemaName, "Schema name is null"); + Database database = databaseDao.getByName(schemaName); + if( database == null){ + throw new SchemaNotFoundException(schemaName); + } + databaseDao.delete(database); + } + + @Override + public ConnectorSchemaMetadata getSchema(ConnectorSession session, String schemaName) { + return new ConnectorSchemaMetadata(schemaName); + } + + @Override + public ConnectorTableHandle alterTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) { + SchemaTableName tableName = tableMetadata.getTable(); + Table table = tableDao.getBySourceDatabaseTableName( connectorId.toString(), tableName.getSchemaName(), tableName.getTableName()); + if( table == null){ + throw new TableNotFoundException(tableName); + } + //we can update the fields, the uri, or the full serde + Location newLocation = converterUtil.toLocation(tableMetadata); + Location location = table.getLocation(); + if( location == null){ + location = new Location(); + location.setTable(table); + table.setLocation(location); + } + if( newLocation.getUri() != null) { + location.setUri(newLocation.getUri()); + } + Info newInfo = newLocation.getInfo(); + if( newInfo!= null){ + Info info = location.getInfo(); + if( info == null){ + location.setInfo(newInfo); + newInfo.setLocation(location); + } else { + if( newInfo.getInputFormat() != null){ + info.setInputFormat(newInfo.getInputFormat()); + } + if( newInfo.getOutputFormat() != null){ + info.setOutputFormat(newInfo.getOutputFormat()); + } + if( newInfo.getOwner() != null){ + info.setOwner(newInfo.getOwner()); + } + if( newInfo.getSerializationLib() != null){ + info.setSerializationLib(newInfo.getSerializationLib()); + } + if( newInfo.getParameters() != null && !newInfo.getParameters().isEmpty()){ + info.setParameters( newInfo.getParameters()); + } + } + } + Schema newSchema = newLocation.getSchema(); + if( newSchema != null){ + List newFields = newSchema.getFields(); + if( newFields != null && !newFields.isEmpty()){ + Schema schema = location.getSchema(); + if( schema == null){ + location.setSchema(newSchema); + newSchema.setLocation(location); + } else { + List fields = schema.getFields(); + if( fields.isEmpty()){ + newFields.forEach(field -> { + field.setSchema(schema); + fields.add(field); + }); + } else { + for(int i=0; i listTableMetadatas(ConnectorSession session, String schemaName, + List tableNames) { + List
tables = tableDao.getBySourceDatabaseTableNames(connectorId.toString(), schemaName, tableNames); + return tables.stream() + .map(table -> + new ConnectorTableDetailMetadata(new SchemaTableName(schemaName, table.getName()) + , converterUtil.toColumnMetadatas(table), converterUtil.getOwner(table) + , converterUtil.toStorageInfo(table), null, converterUtil.toAuditInfo(table)) + ) + .collect(Collectors.toList()); + } + + @Override + public List listSchemaNames(ConnectorSession session) { + List databases = sourceDao.getByName(connectorId.toString(), false).getDatabases(); + return databases.stream().map(database -> database.getName().toLowerCase(Locale.ENGLISH)).collect(Collectors.toList()); + } + + @Override + public ConnectorTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) { + return new HiveTableHandle(connectorId.toString(), tableName.getSchemaName(), tableName.getTableName()); + } + + @Override + public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle tableHandle) { + SchemaTableName schemaTableName = schemaTableName(tableHandle); + return getTableMetadata(schemaTableName); + } + + @Override + public List listTables(ConnectorSession session, String schemaNameOrNull) { + ImmutableList.Builder tableNames = ImmutableList.builder(); + for (String schemaName : listSchemas(session, schemaNameOrNull)) { + Database database = databaseDao.getBySourceDatabaseName(connectorId.toString(), schemaName); + if( database != null ){ + for (Table table : database.getTables()) { + tableNames.add(new SchemaTableName(schemaName, table.getName().toLowerCase(Locale.ENGLISH))); + } + } + } + return tableNames.build(); + } + + private List listSchemas(ConnectorSession session, String schemaNameOrNull) + { + if (schemaNameOrNull == null) { + return listSchemaNames(session); + } + return ImmutableList.of(schemaNameOrNull); + } + + @Override + public ColumnHandle getSampleWeightColumnHandle(ConnectorSession session, ConnectorTableHandle tableHandle) { + SchemaTableName schemaTableName = schemaTableName(tableHandle); + Table table = tableDao.getBySourceDatabaseTableName(connectorId.toString(), schemaTableName.getSchemaName(), + schemaTableName.getTableName()); + if ( table == null) { + throw new TableNotFoundException(schemaTableName); + } + ImmutableMap.Builder columnHandles = ImmutableMap.builder(); + for ( Field field : getFields(table)) { + if( SAMPLE_WEIGHT_COLUMN_NAME.equals(field.getName())){ + String type = field.getType(); + Type prestoType = converterUtil.toType(type); + HiveType hiveType = HiveType.toHiveType(prestoType); + return new HiveColumnHandle(connectorId.toString(), field.getName(), field.getPos() + , hiveType, prestoType.getTypeSignature(), field.getPos(), field.isPartitionKey()); + } + } + return null; + } + + @Override + public boolean canCreateSampledTables(ConnectorSession session) { + return false; + } + + @Override + public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) { + SchemaTableName schemaTableName = schemaTableName(tableHandle); + Table table = tableDao.getBySourceDatabaseTableName(connectorId.toString(), schemaTableName.getSchemaName(), + schemaTableName.getTableName()); + if ( table == null) { + throw new TableNotFoundException(schemaTableName); + } + ImmutableMap.Builder columnHandles = ImmutableMap.builder(); + for ( Field field : getFields(table)) { + String type = field.getType(); + Type prestoType = converterUtil.toType(type); + HiveType hiveType = HiveType.toHiveType(prestoType); + columnHandles.put(field.getName(), new HiveColumnHandle(connectorId.toString(), field.getName(), field.getPos() + , hiveType, prestoType.getTypeSignature(), field.getPos(), field.isPartitionKey())); + } + return columnHandles.build(); + } + + private List getFields(Table table){ + List result = Lists.newArrayList(); + Location location = table.getLocation(); + if( location != null){ + Schema schema = location.getSchema(); + if( schema != null && schema.getFields() != null){ + result = schema.getFields(); + } + } + return result; + } + + @Override + public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, + ColumnHandle columnHandle) { + checkType(tableHandle, HiveTableHandle.class, "tableHandle"); + return checkType(columnHandle, HiveColumnHandle.class, "columnHandle").getColumnMetadata(typeManager); + } + + @Override + public Map> listTableColumns(ConnectorSession session, + SchemaTablePrefix prefix) { + checkNotNull(prefix, "prefix is null"); + ImmutableMap.Builder> columns = ImmutableMap.builder(); + for (SchemaTableName tableName : listTables(session, prefix)) { + try { + columns.put(tableName, getTableMetadata(tableName).getColumns()); + } + catch (TableNotFoundException e) { + // table disappeared during listing operation + } + } + return columns.build(); + } + + private ConnectorTableMetadata getTableMetadata(SchemaTableName schemaTableName) { + Table table = tableDao.getBySourceDatabaseTableName(connectorId.toString(), schemaTableName.getSchemaName(), + schemaTableName.getTableName()); + if( table == null){ + throw new TableNotFoundException(schemaTableName); + } + return new ConnectorTableDetailMetadata( new SchemaTableName(schemaTableName.getSchemaName(), table.getName()) + , converterUtil.toColumnMetadatas(table),converterUtil.getOwner(table) + , converterUtil.toStorageInfo(table), null, converterUtil.toAuditInfo(table)); + } + + private List listTables(ConnectorSession session, SchemaTablePrefix prefix) + { + if (prefix.getSchemaName() == null || prefix.getTableName() == null) { + return listTables(session, prefix.getSchemaName()); + } + return ImmutableList.of(new SchemaTableName(prefix.getSchemaName(), prefix.getTableName())); + } + + @Override + public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) { + checkArgument(!Strings.isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty"); + + SchemaTableName schemaTableName = tableMetadata.getTable(); + if( tableDao.getBySourceDatabaseTableName(connectorId.toString(), schemaTableName.getSchemaName(), + schemaTableName.getTableName()) != null){ + throw new TableAlreadyExistsException(schemaTableName); + } + Database database = databaseDao.getBySourceDatabaseName(connectorId.toString(), schemaTableName.getSchemaName()); + Table table = new Table(); + table.setName(schemaTableName.getTableName()); + table.setDatabase(database); + Location location = converterUtil.toLocation(tableMetadata); + if( location != null) { + location.setTable(table); + table.setLocation(location); + } + tableDao.save(table); + } + + @Override + public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) { + HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; + Table table = tableDao.getBySourceDatabaseTableName( connectorId.toString(), hiveTableHandle.getSchemaName(), hiveTableHandle.getTableName()); + if( table == null){ + throw new TableNotFoundException(new SchemaTableName(hiveTableHandle.getSchemaName(), hiveTableHandle.getTableName())); + } + tableDao.delete(table); + } + + @Override + public void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, SchemaTableName newTableName) { + HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; + Table table = tableDao.getBySourceDatabaseTableName(connectorId.toString(), hiveTableHandle.getSchemaName(), + hiveTableHandle.getTableName()); + if( table == null){ + throw new TableNotFoundException(new SchemaTableName(hiveTableHandle.getSchemaName(), hiveTableHandle.getTableName())); + } + Table newTable = tableDao.getBySourceDatabaseTableName(connectorId.toString(), newTableName.getSchemaName(), + newTableName.getTableName()); + if( newTable == null){ + table.setName(newTableName.getTableName()); + tableDao.save(table); + } else { + throw new TableAlreadyExistsException(newTableName, "Table with new name already exists"); + } + } + + @Override + public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) { + + checkArgument(!Strings.isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty"); + + HiveStorageFormat hiveStorageFormat = HiveTableProperties.getHiveStorageFormat(tableMetadata.getProperties()); + ImmutableList.Builder columnNames = ImmutableList.builder(); + ImmutableList.Builder columnTypes = ImmutableList.builder(); + + // get the root directory for the database + SchemaTableName schemaTableName = tableMetadata.getTable(); + String schemaName = schemaTableName.getSchemaName(); + String tableName = schemaTableName.getTableName(); + + buildColumnInfo(tableMetadata, columnNames, columnTypes); + + Path targetPath = getTargetPath(schemaName, tableName, schemaTableName); + + + // use a per-user temporary directory to avoid permission problems + // TODO: this should use Hadoop UserGroupInformation + String temporaryPrefix = "/tmp/presto-" + StandardSystemProperty.USER_NAME.value(); + + // create a temporary directory on the same filesystem + Path temporaryRoot = new Path(targetPath, temporaryPrefix); + Path temporaryPath = new Path(temporaryRoot, UUID.randomUUID().toString()); + createDirectories(temporaryPath); + + return new HiveOutputTableHandle( + connectorId.toString(), + schemaName, + tableName, + columnNames.build(), + columnTypes.build(), + tableMetadata.getOwner(), + targetPath.toString(), + temporaryPath.toString(), + hiveStorageFormat); + } + + private Path getTargetPath(String schemaName, String tableName, SchemaTableName schemaTableName) + { + String location = sourceDao.getByName(connectorId.toString()).getThriftUri(); + if (isNullOrEmpty(location)) { + throw new PrestoException(HIVE_DATABASE_LOCATION_ERROR, format("Database '%s' location is not set", schemaName)); + } + + Path databasePath = new Path(location); + if (!pathExists(databasePath)) { + throw new PrestoException(HIVE_DATABASE_LOCATION_ERROR, format("Database '%s' location does not exist: %s", schemaName, databasePath)); + } + if (!isDirectory(databasePath)) { + throw new PrestoException(HIVE_DATABASE_LOCATION_ERROR, format("Database '%s' location is not a directory: %s", schemaName, databasePath)); + } + + // verify the target directory for the table + Path targetPath = new Path(databasePath, tableName); + if (pathExists(targetPath)) { + throw new PrestoException(HIVE_PATH_ALREADY_EXISTS, format("Target directory for table '%s' already exists: %s", schemaTableName, targetPath)); + } + return targetPath; + } + + private boolean pathExists(Path path) + { + try { + return hdfsEnvironment.getFileSystem(path).exists(path); + } + catch (IOException e) { + throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed checking path: " + path, e); + } + } + + private boolean isDirectory(Path path) + { + try { + return hdfsEnvironment.getFileSystem(path).isDirectory(path); + } + catch (IOException e) { + throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed checking path: " + path, e); + } + } + + private void createDirectories(Path path) + { + try { + if (!hdfsEnvironment.getFileSystem(path).mkdirs(path)) { + throw new IOException("mkdirs returned false"); + } + } + catch (IOException e) { + throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed to create directory: " + path, e); + } + } + + private static void buildColumnInfo(ConnectorTableMetadata tableMetadata, ImmutableList.Builder names, ImmutableList.Builder types) + { + for (ColumnMetadata column : tableMetadata.getColumns()) { + names.add(column.getName()); + types.add(column.getType()); + } + + if (tableMetadata.isSampled()) { + names.add(SAMPLE_WEIGHT_COLUMN_NAME); + types.add(BIGINT); + } + } + + @Override + public void commitCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, + Collection fragments) { + HiveOutputTableHandle handle = checkType(tableHandle, HiveOutputTableHandle.class, "tableHandle"); + + // verify no one raced us to create the target directory + Path targetPath = new Path(handle.getTargetPath()); + + // rename if using a temporary directory + if (handle.hasTemporaryPath()) { + if (pathExists(targetPath)) { + SchemaTableName table = new SchemaTableName(handle.getSchemaName(), handle.getTableName()); + throw new PrestoException(HIVE_PATH_ALREADY_EXISTS, format("Unable to commit creation of table '%s': target directory already exists: %s", table, targetPath)); + } + // rename the temporary directory to the target + rename(new Path(handle.getTemporaryPath()), targetPath); + } + + // create the table in the metastore + List types = handle.getColumnTypes().stream() + .map(HiveType::toHiveType) + .map(HiveType::getHiveTypeName) + .collect(Collectors.toList()); + + boolean sampled = false; + ImmutableList.Builder columns = ImmutableList.builder(); + for (int i = 0; i < handle.getColumnNames().size(); i++) { + String name = handle.getColumnNames().get(i); + String type = types.get(i); + Field field = new Field(); + field.setName(name); + field.setPos(i); + field.setType(type); + if (name.equals(SAMPLE_WEIGHT_COLUMN_NAME)) { + field.setComment("Presto sample weight column"); + sampled = true; + } + columns.add(field); + } + + HiveStorageFormat hiveStorageFormat = handle.getHiveStorageFormat(); + + Database database = databaseDao.getBySourceDatabaseName(connectorId.toString(), handle.getSchemaName()); + + Table table = new Table(); + table.setName(handle.getTableName()); + table.setDatabase(database); + + Location location = new Location(); + location.setTable(table); + table.setLocation(location); + location.setUri(targetPath.toString()); + Info info = new Info(); + info.setLocation(location); + info.setInputFormat(hiveStorageFormat.getInputFormat()); + info.setOutputFormat(hiveStorageFormat.getOutputFormat()); + info.setOwner(handle.getTableOwner()); + info.setParameters(ImmutableMap.of()); + info.setSerializationLib(hiveStorageFormat.getSerDe()); + location.setInfo(info); + + Schema schema = new Schema(); + schema.setLocation(location); + schema.setFields(columns.build()); + location.setSchema(schema); + + tableDao.save(table); + } + + private void rename(Path source, Path target) + { + try { + if (!hdfsEnvironment.getFileSystem(source).rename(source, target)) { + throw new IOException("rename returned false"); + } + } + catch (IOException e) { + throw new PrestoException(HIVE_FILESYSTEM_ERROR, format("Failed to rename %s to %s", source, target), e); + } + } + + @Override + public ConnectorInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle) { + throw new PrestoException(NOT_SUPPORTED, "INSERT not yet supported for S3"); + } + + @Override + public void commitInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, + Collection fragments) { + throw new PrestoException(NOT_SUPPORTED, "INSERT not yet supported for S3"); + } + + @Override + public void createView(ConnectorSession session, SchemaTableName viewName, String viewData, boolean replace) { + throw new PrestoException(NOT_SUPPORTED, "Views not yet supported for S3"); + } + + @Override + public void dropView(ConnectorSession session, SchemaTableName viewName) { + throw new PrestoException(NOT_SUPPORTED, "Views not yet supported for S3"); + } + + @Override + public List listViews(ConnectorSession session, String schemaNameOrNull) { + throw new PrestoException(NOT_SUPPORTED, "Views not yet supported for S3"); + } + + @Override + public Map getViews(ConnectorSession session, SchemaTablePrefix prefix) { + throw new PrestoException(NOT_SUPPORTED, "Views not yet supported for S3"); + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3Module.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3Module.java new file mode 100644 index 000000000..a8a458d99 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3Module.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.s3.connector; + +import com.facebook.presto.hive.NoAccessControl; +import com.facebook.presto.spi.ConnectorMetadata; +import com.facebook.presto.spi.ConnectorSplitManager; +import com.facebook.presto.spi.security.ConnectorAccessControl; +import com.google.inject.Binder; +import com.google.inject.Module; +import com.google.inject.Scopes; +import com.netflix.metacat.s3.connector.dao.DatabaseDao; +import com.netflix.metacat.s3.connector.dao.FieldDao; +import com.netflix.metacat.s3.connector.dao.PartitionDao; +import com.netflix.metacat.s3.connector.dao.SourceDao; +import com.netflix.metacat.s3.connector.dao.TableDao; +import com.netflix.metacat.s3.connector.dao.impl.DatabaseDaoImpl; +import com.netflix.metacat.s3.connector.dao.impl.FieldDaoImpl; +import com.netflix.metacat.s3.connector.dao.impl.PartitionDaoImpl; +import com.netflix.metacat.s3.connector.dao.impl.SourceDaoImpl; +import com.netflix.metacat.s3.connector.dao.impl.TableDaoImpl; +import com.netflix.metacat.s3.connector.util.ConverterUtil; + +/** + * Created by amajumdar on 10/9/15. + */ +public class S3Module implements Module { + + @Override + public void configure(Binder binder) { + binder.bind(ConnectorMetadata.class).to(S3DetailMetadata.class).in(Scopes.SINGLETON); + binder.bind(ConnectorSplitManager.class).to(S3SplitDetailManager.class).in(Scopes.SINGLETON); + + binder.bind(ConnectorAccessControl.class).to(NoAccessControl.class).in(Scopes.SINGLETON); + + binder.bind(ConverterUtil.class).in(Scopes.SINGLETON); + binder.bind(DatabaseDao.class).to(DatabaseDaoImpl.class); + binder.bind(PartitionDao.class).to(PartitionDaoImpl.class); + binder.bind(SourceDao.class).to(SourceDaoImpl.class); + binder.bind(TableDao.class).to(TableDaoImpl.class); + binder.bind(FieldDao.class).to(FieldDaoImpl.class); + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3Plugin.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3Plugin.java new file mode 100644 index 000000000..34af56252 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3Plugin.java @@ -0,0 +1,63 @@ +package com.netflix.metacat.s3.connector; + +import com.facebook.presto.spi.ConnectorFactory; +import com.facebook.presto.spi.Plugin; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import javax.inject.Inject; +import java.util.List; +import java.util.Map; + +import static com.facebook.presto.type.FloatType.FLOAT; +import static com.facebook.presto.type.IntType.INT; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 10/9/15. + */ +public class S3Plugin implements Plugin +{ + private TypeManager typeManager; + private Map optionalConfig = ImmutableMap.of(); + + @Override + public synchronized void setOptionalConfig(Map optionalConfig) + { + this.optionalConfig = ImmutableMap.copyOf(checkNotNull(optionalConfig, "optionalConfig is null")); + } + + @Inject + public synchronized void setTypeManager(TypeManager typeManager) + { + this.typeManager = typeManager; + } + + public synchronized Map getOptionalConfig() + { + return optionalConfig; + } + + @Override + public synchronized List getServices(Class type) + { + if (type == ConnectorFactory.class) { + return ImmutableList.of(type.cast( + new S3ConnectorFactory(typeManager, getOptionalConfig(), getClassLoader()))); + } else if (type == Type.class){ + return ImmutableList.of(type.cast(FLOAT), type.cast(INT)); + } + return ImmutableList.of(); + } + + private static ClassLoader getClassLoader() + { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = S3Plugin.class.getClassLoader(); + } + return classLoader; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3SplitDetailManager.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3SplitDetailManager.java new file mode 100644 index 000000000..fc0646c39 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/S3SplitDetailManager.java @@ -0,0 +1,293 @@ +package com.netflix.metacat.s3.connector; + +import com.facebook.presto.hive.HiveConnectorId; +import com.facebook.presto.spi.AuditInfo; +import com.facebook.presto.spi.ColumnHandle; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionDetail; +import com.facebook.presto.spi.ConnectorPartitionDetailImpl; +import com.facebook.presto.spi.ConnectorPartitionResult; +import com.facebook.presto.spi.ConnectorSession; +import com.facebook.presto.spi.ConnectorSplitDetailManager; +import com.facebook.presto.spi.ConnectorSplitSource; +import com.facebook.presto.spi.ConnectorTableHandle; +import com.facebook.presto.spi.ConnectorTableLayoutHandle; +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.SavePartitionResult; +import com.facebook.presto.spi.SchemaTableName; +import com.facebook.presto.spi.SchemaTablePartitionName; +import com.facebook.presto.spi.Sort; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.TableNotFoundException; +import com.facebook.presto.spi.TupleDomain; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.inject.persist.Transactional; +import com.netflix.metacat.common.partition.parser.PartitionParser; +import com.netflix.metacat.common.partition.util.FilterPartition; +import com.netflix.metacat.common.partition.util.PartitionUtil; +import com.netflix.metacat.common.partition.visitor.PartitionKeyParserEval; +import com.netflix.metacat.common.partition.visitor.PartitionParamParserEval; +import com.netflix.metacat.s3.connector.dao.PartitionDao; +import com.netflix.metacat.s3.connector.dao.TableDao; +import com.netflix.metacat.s3.connector.model.Info; +import com.netflix.metacat.s3.connector.model.Location; +import com.netflix.metacat.s3.connector.model.Partition; +import com.netflix.metacat.s3.connector.model.Table; +import com.netflix.metacat.s3.connector.util.ConverterUtil; + +import javax.inject.Inject; +import java.io.StringReader; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static com.facebook.presto.hive.HiveUtil.schemaTableName; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by amajumdar on 10/9/15. + */ +@Transactional +public class S3SplitDetailManager implements ConnectorSplitDetailManager{ + @Inject + TableDao tableDao; + @Inject + PartitionDao partitionDao; + @Inject + HiveConnectorId connectorId; + @Inject + ConverterUtil converterUtil; + private static final String FIELD_DATE_CREATED = "dateCreated"; + private static final String FIELD_BATCHID = "batchid"; + + @Override + public ConnectorPartitionResult getPartitions(ConnectorTableHandle table, String filterExpression, + List partitionIds, Sort sort, Pageable pageable, boolean includePartitionDetails) { + SchemaTableName tableName = schemaTableName(table); + return new ConnectorPartitionResult( _getPartitions(tableName, filterExpression, partitionIds, sort, pageable, includePartitionDetails) + , TupleDomain.none()); + } + + private List _getPartitions(SchemaTableName tableName, final String filterExpression + , List partitionIds, Sort sort, Pageable pageable, boolean includePartitionDetails) { + // + // Limiting the in clause to 5000 part names because the sql query with the IN clause for part_name(767 bytes) + // will hit the max sql query length(max_allowed_packet for our RDS) if we use more than 5400 or so + // + final List partitions = Lists.newArrayList(); + if( partitionIds != null && partitionIds.size() > 5000){ + List> subFilterPartitionNamesList = Lists.partition( partitionIds, 5000); + subFilterPartitionNamesList.forEach( + subPartitionIds -> partitions.addAll( _getConnectorPartitions(tableName, filterExpression, + subPartitionIds, sort, pageable, includePartitionDetails))); + } else { + partitions.addAll(_getConnectorPartitions(tableName, filterExpression, partitionIds, sort, pageable, includePartitionDetails)); + } + return partitions; + } + + private List _getConnectorPartitions(SchemaTableName tableName, final String filterExpression + , List partitionIds, Sort sort, Pageable pageable, boolean includePartitionDetails) { + // batch exists + boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID); + // Support for dateCreated + boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED); + String dateCreatedSqlCriteria = null; + if( hasDateCreated){ + dateCreatedSqlCriteria = getDateCreatedSqlCriteria( filterExpression); + } + // Table + Table table = tableDao.getBySourceDatabaseTableName(connectorId.toString(), tableName.getSchemaName(), tableName.getTableName()); + if ( table == null) { + throw new TableNotFoundException(tableName); + } + Collection singlePartitionExprs = getSinglePartitionExprs( filterExpression); + List partitions = partitionDao.getPartitions(table.getId(), partitionIds, singlePartitionExprs, dateCreatedSqlCriteria, sort, Strings.isNullOrEmpty(filterExpression)?pageable:null); + FilterPartition filter = new FilterPartition(); + // + Map> domainMap = Maps.newHashMapWithExpectedSize(1); + domainMap.put(new ColumnHandle(){}, "ignore"); + TupleDomain tupleDomain = TupleDomain.withFixedValues(domainMap); + + List result = partitions.stream().filter(partition -> { + Map values = null; + if( hasDateCreated){ + values = Maps.newHashMap(); + values.put(FIELD_DATE_CREATED, (partition.getCreatedDate().getTime() / 1000) + ""); + } + return Strings.isNullOrEmpty(filterExpression) + || filter.evaluatePartitionExpression( filterExpression, partition.getName(), partition.getUri(), isBatched, values); + }).map( partition -> { + StorageInfo storageInfo = new StorageInfo(); + Location location = table.getLocation(); + if( location != null){ + Info info = location.getInfo(); + if( info != null){ + storageInfo.setInputFormat(info.getInputFormat()); + storageInfo.setOutputFormat(info.getOutputFormat()); + storageInfo.setSerializationLib(info.getSerializationLib()); + if( includePartitionDetails){ + storageInfo.setParameters(Maps.newHashMap(info.getParameters())); + } + } + } + storageInfo.setUri(partition.getUri()); + AuditInfo auditInfo = new AuditInfo(); + Date createdDate = partition.getCreatedDate(); + if( createdDate != null) { + auditInfo.setCreatedDate( createdDate.getTime()/1000); + } + Date lastUpdatedDate = partition.getLastUpdatedDate(); + if( lastUpdatedDate != null) { + auditInfo.setLastUpdatedDate( lastUpdatedDate.getTime()/1000); + } + return new ConnectorPartitionDetailImpl(partition.getName(), tupleDomain, storageInfo, null, auditInfo); + }).collect(Collectors.toList()); + // + if( pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)){ + int limit = pageable.getLimit(); + if( result.size() < limit){ + limit = result.size(); + } + result = result.subList(pageable.getOffset(), limit); + } + return result; + } + + private String getDateCreatedSqlCriteria(String filterExpression) { + StringBuilder result = new StringBuilder(); + Collection values = Lists.newArrayList(); + if( !Strings.isNullOrEmpty(filterExpression)) { + try { + values = (Collection) new PartitionParser(new StringReader(filterExpression)).filter() + .jjtAccept(new PartitionParamParserEval(), null); + } catch (Throwable ignored) { + // + } + } + for(String value : values){ + if( result.length() != 0){ + result.append(" and "); + } + result.append(value.replace("dateCreated", "to_seconds(p.date_created)")); + } + return result.toString(); + } + + private Collection getSinglePartitionExprs(String filterExpression) { + Collection result = Lists.newArrayList(); + if( !Strings.isNullOrEmpty(filterExpression)) { + try { + result = (Collection) new PartitionParser(new StringReader(filterExpression)).filter() + .jjtAccept(new PartitionKeyParserEval(), null); + } catch (Throwable ignored) { + // + } + } + if( result != null) { + result = result.stream().filter(s -> !(s.startsWith("batchid=") || s.startsWith("dateCreated="))).collect( + Collectors.toList()); + } + return result; + } + + @Override + @Transactional + public SavePartitionResult savePartitions(ConnectorTableHandle tableHandle, List partitions + , List partitionIdsForDeletes, boolean checkIfExists) { + checkNotNull(tableHandle, "tableHandle is null"); + SavePartitionResult result = new SavePartitionResult(); + SchemaTableName tableName = schemaTableName(tableHandle); + // Table + Table table = tableDao.getBySourceDatabaseTableName(connectorId.toString(), tableName.getSchemaName(), + tableName.getTableName()); + if ( table == null) { + throw new TableNotFoundException(tableName); + } + + // New partition ids + List addedPartitionIds = Lists.newArrayList(); + // Updated partition ids + List existingPartitionIds = Lists.newArrayList(); + // + Map existingPartitionMap = Maps.newHashMap(); + + if( checkIfExists) { + List partitionNames = partitions.stream().map( + partition -> { + String partitionName = partition.getPartitionId(); + PartitionUtil.validatePartitionName(partitionName, converterUtil.partitionKeys(table)); + return partitionName; + }).collect(Collectors.toList()); + existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames); + } + + // New partitions + List s3Partitions = Lists.newArrayList(); + for(ConnectorPartition partition:partitions){ + String partitionName = partition.getPartitionId(); + Partition s3Partition = existingPartitionMap.get(partitionName); + if(s3Partition == null){ + addedPartitionIds.add(partitionName); + s3Partitions.add(converterUtil.toPartition(table, partition)); + } else { + ConnectorPartitionDetail partitionDetail = (ConnectorPartitionDetail) partition; + String partitionUri = converterUtil.getUri(partitionDetail); + String s3PartitionUri = s3Partition.getUri(); + if( partitionUri != null && !partitionUri.equals( s3PartitionUri)){ + s3Partition.setUri(partitionUri); + existingPartitionIds.add(partitionName); + s3Partitions.add(s3Partition); + } + } + } + partitionDao.save(s3Partitions); + if( partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) { + partitionDao.deleteByNames(connectorId.toString(), tableName.getSchemaName(), + tableName.getTableName(), partitionIdsForDeletes); + } + + result.setAdded( addedPartitionIds); + result.setUpdated( existingPartitionIds); + return result; + } + + private Map getPartitionsByNames(Long tableId, + List partitionNames) { + List partitions = partitionDao.getPartitions( tableId, partitionNames, null, null, null, null); + return partitions.stream().collect(Collectors.toMap(Partition::getName, partition -> partition)); + } + + @Override + @Transactional + public void deletePartitions(ConnectorTableHandle tableHandle, List partitionIds) { + SchemaTableName schemaTableName = schemaTableName(tableHandle); + partitionDao.deleteByNames(connectorId.toString(), schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionIds); + } + + @Override + public Integer getPartitionCount(ConnectorTableHandle tableHandle) { + SchemaTableName schemaTableName = schemaTableName(tableHandle); + return partitionDao.count(connectorId.toString(), schemaTableName.getSchemaName(), + schemaTableName.getTableName()); + } + + @Override + public List getPartitionNames(String uri, boolean prefixSearch) { + List partitions = partitionDao.getByUri( uri, prefixSearch); + return partitions.stream().map(partition -> + new SchemaTablePartitionName( + new SchemaTableName(partition.getTable().getDatabase().getName(), partition.getTable().getName()), partition.getName())) + .collect(Collectors.toList()); + } + + + @Override + public ConnectorSplitSource getSplits(ConnectorSession session, ConnectorTableLayoutHandle layout) { + return null; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/BaseDao.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/BaseDao.java new file mode 100644 index 000000000..3ad1d373e --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/BaseDao.java @@ -0,0 +1,108 @@ +package com.netflix.metacat.s3.connector.dao; + +import java.util.List; + +/** + * The base dao. + * + * @param + */ +public interface BaseDao { + + /** + * Save the entity to the data store. + * @param entity the entity to save. + */ + public T save( T entity); + + /** + * Save the entity and refresh the entity from + * the database if required. + * + * @param entity the entity to be saved and refreshed. + * + * @param isRefreshRequired {@code true} to perform a refresh from the store. + */ + public T save(T entity, boolean isRefreshRequired); + + /** + * Saves all given entities. + * + * @param entities + * @return the saved entities + * @throws IllegalArgumentException in case the given entity is (@literal null}. + */ + public List save( Iterable entities); + + /** + * Delete the entity by using the id, + * @param id the id of the entity. + */ + public void deleteById(Long id); + /** + * Delete the entities for the given ids. + * @param ids list of ids. + */ + public void deleteById(Iterable ids); + /** + * Delete the given entity + * @param entity + */ + public void delete(T entity); + /** + * Delete the given entities + * @param entities + */ + public void delete( Iterable entities); + /** + * Deletes all entities managed by the repository. + */ + public void deleteAll(); + /** + * Returns whether an entity with the given id exists. + * + * @param id must not be {@literal null}. + * @return true if an entity with the given id exists, {@literal false} otherwise + * @throws IllegalArgumentException if {@code id} is {@literal null} + */ + public boolean isExists(Long id); + + /** + * Returns an entity for the given id + * @param id id of the entity + * @return + */ + public T get(Long id); + /** + * Returns an entity for the given name + * @param name name of the entity + * @return + */ + public T getByName(String name); + /** + * Returns a list of entities for the given names + * @param names names of the entities + * @return + */ + public List getByNames(List names); + /** + * Returns an entity for the given id + * @param ids list of ids + * @return + */ + public List get(Iterable ids); + /** + * Returns all the instances + * @return Returns all the instances + */ + public List getAll(); + + /** + * Returns the number of entities available. + * + * @return the number of entities + */ + long count(); + +} + diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/DatabaseDao.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/DatabaseDao.java new file mode 100644 index 000000000..9827cadb5 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/DatabaseDao.java @@ -0,0 +1,13 @@ +package com.netflix.metacat.s3.connector.dao; + +import com.netflix.metacat.s3.connector.model.Database; + +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public interface DatabaseDao extends BaseDao { + public Database getBySourceDatabaseName(String sourceName, String databaseName); + public List getBySourceDatabaseNames(String sourceName, List databaseNames); +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/FieldDao.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/FieldDao.java new file mode 100644 index 000000000..db2d703cc --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/FieldDao.java @@ -0,0 +1,9 @@ +package com.netflix.metacat.s3.connector.dao; + +import com.netflix.metacat.s3.connector.model.Field; + +/** + * Created by amajumdar on 1/2/15. + */ +public interface FieldDao extends BaseDao { +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/PartitionDao.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/PartitionDao.java new file mode 100644 index 000000000..9a64f23f6 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/PartitionDao.java @@ -0,0 +1,44 @@ +package com.netflix.metacat.s3.connector.dao; + +/** + * Created by amajumdar on 10/10/15. + */ + +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.Sort; +import com.netflix.metacat.s3.connector.model.Partition; + +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public interface PartitionDao extends BaseDao { + List getPartitions( Long tableId, List partitionIds, Iterable partitionParts, String dateCreatedSqlCriteria, Sort sort, Pageable pageable); + + /** + * Deletes the partitions for the given table and list of partition ids + * @param sourceName catalog/source name + * @param databaseName schema/database name + * @param tableName table name + * @param partitionIds list of partition ids + */ + void deleteByNames(String sourceName, String databaseName, String tableName, List partitionIds); + + /** + * Returns the number of partitions for the given table. + * @param sourceName catalog/source name + * @param databaseName schema/database name + * @param tableName table name + * @return number of partitions + */ + Integer count(String sourceName, String databaseName, String tableName); + + /** + * Returns the list of partitions with the given uri. + * @param uri uri path + * @param prefixSearch true, if the given uri is partial + * @return list of partitions + */ + List getByUri(String uri, boolean prefixSearch); +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/SourceDao.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/SourceDao.java new file mode 100644 index 000000000..aeb72ae58 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/SourceDao.java @@ -0,0 +1,10 @@ +package com.netflix.metacat.s3.connector.dao; + +import com.netflix.metacat.s3.connector.model.Source; + +/** + * Created by amajumdar on 1/2/15. + */ +public interface SourceDao extends BaseDao { + Source getByName(String name, boolean fromCache); +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/TableDao.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/TableDao.java new file mode 100644 index 000000000..9bbded7df --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/TableDao.java @@ -0,0 +1,13 @@ +package com.netflix.metacat.s3.connector.dao; + +import com.netflix.metacat.s3.connector.model.Table; + +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public interface TableDao extends BaseDao
{ + public Table getBySourceDatabaseTableName(String sourceName, String schemaName, String tableName); + public List
getBySourceDatabaseTableNames(String sourceName, String schemaName, List tableNames); +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/BaseDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/BaseDaoImpl.java new file mode 100644 index 000000000..be0b0bad6 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/BaseDaoImpl.java @@ -0,0 +1,152 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.netflix.metacat.s3.connector.dao.BaseDao; + +import javax.inject.Provider; +import javax.persistence.EntityManager; +import javax.persistence.TypedQuery; +import java.util.List; + +/** + * Created by amajumdar on 12/30/14. + */ +public abstract class BaseDaoImpl implements BaseDao { + Provider em; + private static final String SQL_GET_BY_NAME = "select a from %s a where name=:name"; + private static final String SQL_GET_BY_NAMES = "select a from %s a where name in (:names)"; + + protected BaseDaoImpl(Provider em) { + this.em = em; + } + + protected abstract Class getEntityClass(); + + @Override + public T save(T entity) { + return save(entity, false); + } + + protected abstract boolean isNew(T entity); + + @Override + public T save(T entity, boolean flush) { + T result = null; + EntityManager entityManager = em.get(); + if (isNew(entity)) { + entityManager.persist(entity); + result = entity; + } else { + result = entityManager.merge(entity); + } + if (flush) { + entityManager.flush(); + } + + return result; + } + + @Override + public List save(Iterable entities) { + List result = Lists.newArrayList(); + + if (entities != null) { + for (T entity : entities) { + result.add(save(entity)); + } + } + + return result; + } + + @Override + public void deleteById(Long id) { + Preconditions.checkArgument(id!=null, "Id cannot be null."); + T entity = get(id); + if( entity != null){ + delete( entity); + } + } + + @Override + public void deleteById(Iterable ids) { + Preconditions.checkArgument(ids!=null, "Ids cannot be null."); + for(Long id: ids){ + deleteById(id); + } + } + + @Override + public void delete(T entity) { + Preconditions.checkArgument(entity!=null, "Entity cannot be null."); + EntityManager entityManager = em.get(); + entityManager.remove(entity); + } + + @Override + public void delete(Iterable entities) { + Preconditions.checkArgument(entities!=null, "Entities cannot be null."); + for(T entity: entities){ + delete(entity); + } + } + + @Override + public void deleteAll() { + em.get().createQuery("delete from " + getEntityClass().getName()).executeUpdate(); + } + + @Override + public boolean isExists(Long id) { + return get(id)!=null; + } + + @Override + public T get(Long id) { + Preconditions.checkArgument(id!=null, "Id cannot be null."); + return em.get().find(getEntityClass(), id); + } + + @Override + public T getByName(String name) { + T result = null; + Preconditions.checkArgument(name!=null, "Name cannot be null."); + TypedQuery query = em.get().createQuery( String.format(SQL_GET_BY_NAME,getEntityClass().getName()), getEntityClass()); + query.setParameter("name", name); + try { + result = query.getSingleResult(); + } catch(Exception ignored){} + return result; + } + + public List getByNames(List names){ + List result = Lists.newArrayList(); + if( names != null && !names.isEmpty()){ + TypedQuery query = em.get().createQuery(String.format(SQL_GET_BY_NAMES, getEntityClass().getName()), getEntityClass()); + query.setParameter("names", names); + result = query.getResultList(); + } + return result; + } + + @Override + public List get(Iterable ids) { + List result = Lists.newArrayList(); + for(Long id: ids){ + result.add(get(id)); + } + return result; + } + + @Override + @SuppressWarnings("unchecked") + public List getAll() { + return em.get().createQuery( "select a from " + getEntityClass().getName() + " a").getResultList(); + } + + @Override + public long count() { + return (long) em.get().createQuery("select count(a) from " + getEntityClass().getName() + " a").getSingleResult(); + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/DatabaseDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/DatabaseDaoImpl.java new file mode 100644 index 000000000..28f6fc9f3 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/DatabaseDaoImpl.java @@ -0,0 +1,45 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.google.common.collect.Lists; +import com.netflix.metacat.s3.connector.dao.DatabaseDao; +import com.netflix.metacat.s3.connector.model.Database; + +import javax.inject.Inject; +import javax.inject.Provider; +import javax.persistence.EntityManager; +import javax.persistence.TypedQuery; +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public class DatabaseDaoImpl extends IdEntityDaoImpl implements DatabaseDao { + @Inject + public DatabaseDaoImpl(Provider em) { + super(em); + } + + @Override + protected Class getEntityClass() { + return Database.class; + } + + @Override + public Database getBySourceDatabaseName(String sourceName, String databaseName) { + Database result = null; + List databases = getBySourceDatabaseNames(sourceName, Lists.newArrayList(databaseName)); + if( !databases.isEmpty()){ + result = databases.get(0); + } + return result; + } + + @Override + public List getBySourceDatabaseNames(String sourceName, List databaseNames) { + TypedQuery query = em.get().createNamedQuery(Database.NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES, + Database.class); + query.setParameter("sourceName", sourceName); + query.setParameter("databaseNames", databaseNames); + return query.getResultList(); + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/FieldDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/FieldDaoImpl.java new file mode 100644 index 000000000..403449c81 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/FieldDaoImpl.java @@ -0,0 +1,23 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.netflix.metacat.s3.connector.dao.FieldDao; +import com.netflix.metacat.s3.connector.model.Field; + +import javax.inject.Inject; +import javax.inject.Provider; +import javax.persistence.EntityManager; + +/** + * Created by amajumdar on 1/2/15. + */ +public class FieldDaoImpl extends IdEntityDaoImpl implements FieldDao { + @Inject + public FieldDaoImpl(Provider em) { + super(em); + } + + @Override + protected Class getEntityClass() { + return Field.class; + } +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/IdEntityDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/IdEntityDaoImpl.java new file mode 100644 index 000000000..3c4056710 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/IdEntityDaoImpl.java @@ -0,0 +1,38 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.google.common.collect.Lists; +import com.netflix.metacat.s3.connector.dao.BaseDao; +import com.netflix.metacat.s3.connector.model.IdEntity; + +import javax.inject.Provider; +import javax.persistence.EntityManager; +import javax.persistence.criteria.CriteriaBuilder; +import javax.persistence.criteria.CriteriaQuery; +import javax.persistence.criteria.Root; +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public abstract class IdEntityDaoImpl extends BaseDaoImpl implements + BaseDao { + + protected IdEntityDaoImpl(Provider em) { + super(em); + } + + @Override + public List get(Iterable ids) { + EntityManager entityManager = em.get(); + CriteriaBuilder cb = entityManager.getCriteriaBuilder(); + CriteriaQuery criteriaQuery = cb.createQuery(getEntityClass()); + Root root = criteriaQuery.from(getEntityClass()); + criteriaQuery.where(root.get("id").in(Lists.newArrayList(ids))); + return entityManager.createQuery(criteriaQuery).getResultList(); + } + + @Override + protected boolean isNew(T entity) { + return entity.getId()==null; + } +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/PartitionDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/PartitionDaoImpl.java new file mode 100644 index 000000000..d05ef90f6 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/PartitionDaoImpl.java @@ -0,0 +1,93 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.facebook.presto.spi.Pageable; +import com.facebook.presto.spi.Sort; +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.netflix.metacat.s3.connector.dao.PartitionDao; +import com.netflix.metacat.s3.connector.model.Partition; + +import javax.inject.Inject; +import javax.inject.Provider; +import javax.persistence.EntityManager; +import javax.persistence.Query; +import javax.persistence.TypedQuery; +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public class PartitionDaoImpl extends IdEntityDaoImpl implements PartitionDao { + private static final String SQL_GET_PARTITIONS = "select * from partition_table as p where p.table_id=:tableId"; + + @Inject + public PartitionDaoImpl(Provider em) { + super(em); + } + + @Override + protected Class getEntityClass() { + return Partition.class; + } + + public List getPartitions( Long tableId, List partitionIds, Iterable partitionParts, String dateCreatedSqlCriteria, Sort sort, Pageable pageable){ + // Create the sql + StringBuilder queryBuilder = new StringBuilder(SQL_GET_PARTITIONS); + if( partitionIds != null && !partitionIds.isEmpty()){ + queryBuilder.append(" and p.name in ('") + .append(Joiner.on("','").skipNulls().join(partitionIds)) + .append("')"); + } + if( partitionParts != null) { + for (String singlePartitionExpr : partitionParts) { + queryBuilder.append(" and p.name like '%").append(singlePartitionExpr).append("%'"); + } + } + if( !Strings.isNullOrEmpty(dateCreatedSqlCriteria)){ + queryBuilder.append(" and ").append(dateCreatedSqlCriteria); + } + if( sort != null && sort.hasSort()){ + queryBuilder.append(" order by ").append( sort.getSortBy()).append(" ").append(sort.getOrder().name()); + } + if( pageable != null && pageable.isPageable()){ + queryBuilder.append(" limit ").append( pageable.getOffset()).append(',').append(pageable.getLimit()); + } + // entityManager + EntityManager entityManager = em.get(); + Query pQuery = entityManager.createNativeQuery(queryBuilder.toString(), Partition.class); + pQuery.setParameter("tableId", tableId); + return pQuery.getResultList(); + } + + @Override + public void deleteByNames(String sourceName, String databaseName, String tableName, List partitionNames) { + Query query = em.get().createNamedQuery(Partition.NAME_QUERY_DELETE_BY_PARTITION_NAMES); + query.setParameter("sourceName", sourceName); + query.setParameter("databaseName", databaseName); + query.setParameter("tableName", tableName); + query.setParameter("partitionNames", partitionNames); + query.executeUpdate(); + } + + @Override + public Integer count(String sourceName, String databaseName, String tableName) { + TypedQuery query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_COUNT_FOR_TABLE, Integer.class); + query.setParameter("sourceName", sourceName); + query.setParameter("databaseName", databaseName); + query.setParameter("tableName", tableName); + return query.getSingleResult(); + } + + @Override + public List getByUri(String uri, boolean prefixSearch) { + TypedQuery query = null; + if( prefixSearch){ + query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_BY_URI_PREFIX, Partition.class); + query.setParameter("uri", uri + "%"); + } else { + query = em.get().createNamedQuery(Partition.NAME_QUERY_GET_BY_URI, Partition.class); + query.setParameter("uri", uri); + } + return query.getResultList(); + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/SourceDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/SourceDaoImpl.java new file mode 100644 index 000000000..24c53b23c --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/SourceDaoImpl.java @@ -0,0 +1,61 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.facebook.presto.exception.CatalogNotFoundException; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.netflix.metacat.s3.connector.dao.SourceDao; +import com.netflix.metacat.s3.connector.model.Source; + +import javax.inject.Inject; +import javax.inject.Provider; +import javax.persistence.EntityManager; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * Created by amajumdar on 1/2/15. + */ +public class SourceDaoImpl extends IdEntityDaoImpl implements SourceDao { + @Inject + public SourceDaoImpl(Provider em) { + super(em); + } + + LoadingCache sourceCache = CacheBuilder.newBuilder().expireAfterWrite( 120, TimeUnit.MINUTES).build( + new CacheLoader () { + @Override + public Source load(String name) throws Exception { + return loadSource(name); + } + }); + + @Override + protected Class getEntityClass() { + return Source.class; + } + + private Source loadSource(String name){ + return super.getByName(name); + } + + public Source getByName(String name){ + Source result = null; + try { + result = sourceCache.get(name); + } catch (ExecutionException ignored) { + // + } + if( result == null){ + throw new CatalogNotFoundException(name); + } + return result; + } + + public Source getByName(String name, boolean fromCache){ + if(!fromCache){ + sourceCache.invalidate(name); + } + return getByName(name); + } +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/TableDaoImpl.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/TableDaoImpl.java new file mode 100644 index 000000000..ed258b14a --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/dao/impl/TableDaoImpl.java @@ -0,0 +1,46 @@ +package com.netflix.metacat.s3.connector.dao.impl; + +import com.google.common.collect.Lists; +import com.netflix.metacat.s3.connector.dao.TableDao; +import com.netflix.metacat.s3.connector.model.Table; + +import javax.inject.Inject; +import javax.inject.Provider; +import javax.persistence.EntityManager; +import javax.persistence.TypedQuery; +import java.util.List; + +/** + * Created by amajumdar on 1/2/15. + */ +public class TableDaoImpl extends IdEntityDaoImpl
implements TableDao { + @Inject + public TableDaoImpl(Provider em) { + super(em); + } + + @Override + protected Class
getEntityClass() { + return Table.class; + } + + @Override + public Table getBySourceDatabaseTableName(String sourceName, String databaseName, String tableName) { + Table result = null; + List
tables = getBySourceDatabaseTableNames(sourceName, databaseName, Lists.newArrayList(tableName)); + if( !tables.isEmpty()){ + result = tables.get(0); + } + return result; + } + + @Override + public List
getBySourceDatabaseTableNames(String sourceName, String databaseName, List tableNames) { + TypedQuery
query = em.get().createNamedQuery(Table.NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES, + Table.class); + query.setParameter("sourceName", sourceName); + query.setParameter("databaseName", databaseName); + query.setParameter("tableNames", tableNames); + return query.getResultList(); + } +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/BaseEntity.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/BaseEntity.java new file mode 100644 index 000000000..74493c6cb --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/BaseEntity.java @@ -0,0 +1,89 @@ +package com.netflix.metacat.s3.connector.model; + +import org.joda.time.Instant; + +import javax.persistence.Column; +import javax.persistence.MappedSuperclass; +import javax.persistence.PrePersist; +import javax.persistence.PreUpdate; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.Date; + +/** + * {@code BaseEntity} is the entity that all entities. + */ +@MappedSuperclass +public class BaseEntity { + + /** The date of creation */ + protected Date createdDate; + + /** The last updated date. */ + protected Date lastUpdatedDate; + + /** + * Get the date and time of the entity creation. + * + * @return + * The date and time of the creation + */ + @Column(name="date_created", insertable = true, updatable = false, nullable = false) + public Date getCreatedDate() { + return createdDate; + } + + /** + * Set the date and time of the creation. + * + * @param createdDate + * The date and time of the creation + */ + public void setCreatedDate(Date createdDate) { + this.createdDate = createdDate; + } + + public void setCreatedDate(Timestamp createdDate) { + this.createdDate = createdDate; + } + + /** + * Get the date and time of the last update. + * + * @return + * Get the date and time of the last update. + */ + @Column(name = "last_updated", insertable = true, updatable = true, nullable = false) + public Date getLastUpdatedDate() { + return lastUpdatedDate; + } + + /** + * Set the date and time of the last update. + * + * @param lastUpdatedDate + * The date and time of the last update + */ + public void setLastUpdatedDate(Date lastUpdatedDate) { + this.lastUpdatedDate = lastUpdatedDate; + } + + public void setLastUpdatedDate(Timestamp lastUpdatedDate) { + this.lastUpdatedDate = lastUpdatedDate; + } + + @PrePersist + public void onInsert() { + setCreatedDate(Calendar.getInstance().getTime()); + setLastUpdatedDate(Instant.now().toDate()); + } + + @PreUpdate + void onUpdate() { + setLastUpdatedDate(Instant.now().toDate()); + } + /** + * Validate the entity for valid values. + */ + public void validate() {} +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/BaseTable.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/BaseTable.java new file mode 100644 index 000000000..f0eaa41f9 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/BaseTable.java @@ -0,0 +1,35 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CascadeType; +import javax.persistence.Column; +import javax.persistence.FetchType; +import javax.persistence.MappedSuperclass; +import javax.persistence.OneToMany; +import java.util.List; + +/** + * Created by amajumdar on 12/23/14. + */ +@MappedSuperclass +public abstract class BaseTable extends IdEntity{ + private String name; + private List partitions; + + @Column(name = "name", nullable = false) + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @OneToMany(cascade= CascadeType.ALL, fetch= FetchType.LAZY, mappedBy = "table") + public List getPartitions() { + return partitions; + } + + public void setPartitions(List partitions) { + this.partitions = partitions; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Database.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Database.java new file mode 100644 index 000000000..495b79c75 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Database.java @@ -0,0 +1,60 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CascadeType; +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.Index; +import javax.persistence.JoinColumn; +import javax.persistence.ManyToOne; +import javax.persistence.NamedQueries; +import javax.persistence.NamedQuery; +import javax.persistence.OneToMany; +import javax.persistence.UniqueConstraint; +import java.util.List; + +/** + * Created by amajumdar on 12/19/14. + */ +@Entity +@javax.persistence.Table(name="database_object", + indexes = @Index(name="database_object_i1", columnList = "name"), + uniqueConstraints= @UniqueConstraint(name="database_object_u1", columnNames = {"source_id", "name"})) +@NamedQueries({ + @NamedQuery( + name = Database.NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES, + query = "select d from Database d where d.source.name=:sourceName and d.name in (:databaseNames)" + ) +}) +public class Database extends IdEntity{ + public static final String NAME_QUERY_GET_BY_SOURCE_DATABASE_NAMES = "getBySourceDatabaseNames"; + private String name; + private List
tables; + private Source source; + + @Column(name = "name", nullable = false) + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + @OneToMany(cascade=CascadeType.ALL, fetch= FetchType.LAZY, mappedBy = "database") + public List
getTables() { + return tables; + } + + public void setTables(List
tables) { + this.tables = tables; + } + @ManyToOne(fetch = FetchType.LAZY, optional=false) + @JoinColumn(name = "source_id", nullable = false) + public Source getSource() { + return source; + } + + public void setSource(Source source) { + this.source = source; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Field.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Field.java new file mode 100644 index 000000000..63034d06b --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Field.java @@ -0,0 +1,88 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.JoinColumn; +import javax.persistence.ManyToOne; +import javax.persistence.UniqueConstraint; + +/** + * Created by amajumdar on 12/23/14. + */ +@Entity +@javax.persistence.Table(name="field", + uniqueConstraints= @UniqueConstraint(name="field_u1",columnNames = {"schema_id", "name", "pos"})) +public class Field extends IdEntity{ + private int pos; + private String name; + private String type; + private String sourceType; + private String comment; + private boolean partitionKey; + private Schema schema; + + @Column(name="pos", nullable = false) + public int getPos() { + return pos; + } + + public void setPos(int pos) { + this.pos = pos; + } + + @Column(name="name", nullable = false) + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Column(name="type", nullable = false, length=4000) + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + @Column(name="source_type", nullable = true) + public String getSourceType() { + return sourceType; + } + + public void setSourceType(String sourceType) { + this.sourceType = sourceType; + } + + @Column(name="comment", nullable = true) + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Column(name="partition_key", nullable = false) + public boolean isPartitionKey() { + return partitionKey; + } + + public void setPartitionKey(boolean partitionKey) { + this.partitionKey = partitionKey; + } + + @ManyToOne(fetch = FetchType.LAZY, optional=false) + @JoinColumn(name = "schema_id", nullable = false) + public Schema getSchema() { + return schema; + } + + public void setSchema(Schema schema) { + this.schema = schema; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/IdEntity.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/IdEntity.java new file mode 100644 index 000000000..999cde26e --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/IdEntity.java @@ -0,0 +1,38 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.Column; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.MappedSuperclass; +import javax.persistence.Version; + +/** + * Created by amajumdar on 12/21/14. + */ +@MappedSuperclass +public class IdEntity extends BaseEntity{ + private Long id; + private Long version; + + @Id + @GeneratedValue(strategy= GenerationType.IDENTITY) + @Column(name="id", unique = true, nullable = false) + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + @Version + @Column(name="version", nullable = false) + public Long getVersion() { + return version; + } + + public void setVersion(Long version) { + this.version = version; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Info.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Info.java new file mode 100644 index 000000000..36e85ec49 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Info.java @@ -0,0 +1,84 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CollectionTable; +import javax.persistence.Column; +import javax.persistence.ElementCollection; +import javax.persistence.Entity; +import javax.persistence.JoinColumn; +import javax.persistence.MapKeyColumn; +import javax.persistence.OneToOne; +import javax.persistence.UniqueConstraint; +import java.util.Map; + +/** + * Created by amajumdar on 12/22/14. + */ +@Entity +@javax.persistence.Table(name="info", + uniqueConstraints= @UniqueConstraint(name="info_u1",columnNames = "location_id")) +public class Info extends IdEntity{ + private String inputFormat; + private String outputFormat; + private String serializationLib; + private String owner; + private Map parameters; + private Location location; + + @Column(name="input_format") + public String getInputFormat() { + return inputFormat; + } + + public void setInputFormat(String inputFormat) { + this.inputFormat = inputFormat; + } + + @Column(name="output_format") + public String getOutputFormat() { + return outputFormat; + } + + public void setOutputFormat(String outputFormat) { + this.outputFormat = outputFormat; + } + + @Column(name="serialization_lib") + public String getSerializationLib() { + return serializationLib; + } + + public void setSerializationLib(String serializationLib) { + this.serializationLib = serializationLib; + } + + @Column(name="owner") + public String getOwner() { + return owner; + } + + public void setOwner(String owner) { + this.owner = owner; + } + + @ElementCollection + @MapKeyColumn(name="parameters_idx") + @Column(name="parameters_elt") + @CollectionTable(name="info_parameters") + public Map getParameters() { + return parameters; + } + + public void setParameters(Map parameters) { + this.parameters = parameters; + } + + @OneToOne + @JoinColumn(name="location_id", nullable=false) + public Location getLocation() { + return location; + } + + public void setLocation(Location location) { + this.location = location; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Location.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Location.java new file mode 100644 index 000000000..a0ac92403 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Location.java @@ -0,0 +1,65 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CascadeType; +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.JoinColumn; +import javax.persistence.OneToOne; +import javax.persistence.UniqueConstraint; + +/** + * Created by amajumdar on 12/22/14. + */ +@Entity +@javax.persistence.Table(name="location", + uniqueConstraints= @UniqueConstraint(name="location_u1",columnNames = "table_id")) +public class Location extends IdEntity{ + /* + static belongsTo = [table: Table] + static hasOne = [schema: Schema, info: Info] + //TODO: Serde info + String uri + */ + private String uri; + private Table table; + private Schema schema; + private Info info; + + @Column(name = "uri", nullable = true) + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @OneToOne + @JoinColumn(name="table_id", nullable=false) + public Table getTable() { + return table; + } + + public void setTable(Table table) { + this.table = table; + } + + @OneToOne(cascade= CascadeType.ALL, fetch= FetchType.EAGER, mappedBy = "location") + public Schema getSchema() { + return schema; + } + + public void setSchema(Schema schema) { + this.schema = schema; + } + + @OneToOne(cascade=CascadeType.ALL, fetch=FetchType.EAGER, mappedBy = "location") + public Info getInfo() { + return info; + } + + public void setInfo(Info info) { + this.info = info; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Partition.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Partition.java new file mode 100644 index 000000000..c946834ee --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Partition.java @@ -0,0 +1,80 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.Index; +import javax.persistence.JoinColumn; +import javax.persistence.ManyToOne; +import javax.persistence.NamedQueries; +import javax.persistence.NamedQuery; +import javax.persistence.UniqueConstraint; + +/** + * Created by amajumdar on 12/22/14. + */ +@Entity +@javax.persistence.Table(name="partition_table", + indexes = { @Index(name="partition_table_i1", columnList = "name"),@Index(name="partition_table_i2",columnList = "uri") }, + uniqueConstraints= @UniqueConstraint(name="partition_table_u1",columnNames = {"table_id", "name"})) +@NamedQueries({ + @NamedQuery( + name = Partition.NAME_QUERY_GET_FOR_TABLE, + query = "select p from Partition p where p.table.name=:tableName and p.table.database.name=:databaseName and p.table.database.source.name=:sourceName" + ), + @NamedQuery( + name = Partition.NAME_QUERY_GET_COUNT_FOR_TABLE, + query = "select count(p) from Partition p where p.table.name=:tableName and p.table.database.name=:databaseName and p.table.database.source.name=:sourceName" + ), + @NamedQuery( + name = Partition.NAME_QUERY_DELETE_BY_PARTITION_NAMES, + query = "delete from Partition p where p.table.id = (select t.id from Table t where t.name=:tableName and t.database.name=:databaseName and t.database.source.name=:sourceName) and p.name in (:partitionNames)" + ) + , + @NamedQuery( + name = Partition.NAME_QUERY_GET_BY_URI, + query = "select p from Partition p where p.uri=:uri" + ), + @NamedQuery( + name = Partition.NAME_QUERY_GET_BY_URI_PREFIX, + query = "select p from Partition p where p.uri like :uri" + ) +}) +public class Partition extends IdEntity{ + public static final String NAME_QUERY_GET_FOR_TABLE = "getForTable"; + public static final String NAME_QUERY_GET_COUNT_FOR_TABLE = "getCountForTable"; + public static final String NAME_QUERY_DELETE_BY_PARTITION_NAMES = "deleteByPartitionNames"; + public static final String NAME_QUERY_GET_BY_URI = "getByUri"; + public static final String NAME_QUERY_GET_BY_URI_PREFIX = "getByUriPrefix"; + private String name; + private String uri; + private Table table; + + @Column(name = "name", nullable = false) + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Column(name = "uri", nullable = false) + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @ManyToOne(fetch = FetchType.LAZY, optional=false ) + @JoinColumn(name = "table_id", nullable = false) + public Table getTable() { + return table; + } + + public void setTable(Table table) { + this.table = table; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Schema.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Schema.java new file mode 100644 index 000000000..881d2b177 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Schema.java @@ -0,0 +1,40 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CascadeType; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.JoinColumn; +import javax.persistence.OneToMany; +import javax.persistence.OneToOne; +import javax.persistence.UniqueConstraint; +import java.util.List; + +/** + * Created by amajumdar on 12/22/14. + */ +@Entity +@javax.persistence.Table(name="schema_object", + uniqueConstraints= @UniqueConstraint(name="schema_object_u1",columnNames = "location_id")) +public class Schema extends IdEntity{ + private Location location; + private List fields; + + @OneToOne + @JoinColumn(name="location_id", nullable=false) + public Location getLocation() { + return location; + } + + public void setLocation(Location location) { + this.location = location; + } + + @OneToMany(cascade= CascadeType.ALL, fetch= FetchType.LAZY, mappedBy = "schema") + public List getFields() { + return fields; + } + + public void setFields(List fields) { + this.fields = fields; + } +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Source.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Source.java new file mode 100644 index 000000000..6b1726619 --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Source.java @@ -0,0 +1,68 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CascadeType; +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.OneToMany; +import javax.persistence.UniqueConstraint; +import java.util.List; + +/** + * Created by amajumdar on 12/19/14. + */ +@Entity +@javax.persistence.Table(name="source", + uniqueConstraints= @UniqueConstraint(name="source_u1",columnNames = "name")) +public class Source extends IdEntity{ + private String name; + private String type; + private String thriftUri; + private boolean disabled = false; + private List databases; + + @Column(name = "name", nullable = false) + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Column(name = "type", nullable = false) + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + @Column(name = "thrift_uri") + public String getThriftUri() { + return thriftUri; + } + + public void setThriftUri(String thriftUri) { + this.thriftUri = thriftUri; + } + + @Column(name = "disabled", nullable = false) + public boolean isDisabled() { + return disabled; + } + + public void setDisabled(boolean disabled) { + this.disabled = disabled; + } + + @OneToMany(cascade= CascadeType.ALL, fetch= FetchType.LAZY, mappedBy = "source") + public List getDatabases() { + return databases; + } + + public void setDatabases(List databases) { + this.databases = databases; + } +} diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Table.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Table.java new file mode 100644 index 000000000..453edfb6a --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/model/Table.java @@ -0,0 +1,50 @@ +package com.netflix.metacat.s3.connector.model; + +import javax.persistence.CascadeType; +import javax.persistence.Entity; +import javax.persistence.FetchType; +import javax.persistence.Index; +import javax.persistence.JoinColumn; +import javax.persistence.ManyToOne; +import javax.persistence.NamedQueries; +import javax.persistence.NamedQuery; +import javax.persistence.OneToOne; +import javax.persistence.UniqueConstraint; + +/** + * Created by amajumdar on 12/19/14. + */ +@Entity +@javax.persistence.Table(name="table_object", + indexes = { @Index(name="table_object_i1", columnList = "name") }, + uniqueConstraints= @UniqueConstraint(name="table_object_u1", columnNames = {"database_id", "name"})) +@NamedQueries({ + @NamedQuery( + name = Table.NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES, + query = "select t from Table t where t.database.source.name=:sourceName and t.database.name=:databaseName and t.name in (:tableNames)" + ) +}) +public class Table extends BaseTable{ + public static final String NAME_QUERY_GET_BY_SOURCE_DATABASE_TABLE_NAMES = "getBySourceDatabaseTableNames"; + private Database database; + private Location location; + + @ManyToOne(fetch = FetchType.LAZY, optional=false) + @JoinColumn(name = "database_id", nullable = false) + public Database getDatabase() { + return database; + } + + public void setDatabase(Database database) { + this.database = database; + } + + @OneToOne(cascade=CascadeType.ALL, fetch=FetchType.EAGER, mappedBy = "table") + public Location getLocation() { + return location; + } + + public void setLocation(Location location) { + this.location = location; + } +} \ No newline at end of file diff --git a/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/util/ConverterUtil.java b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/util/ConverterUtil.java new file mode 100644 index 000000000..f5bd21e6c --- /dev/null +++ b/metacat-s3-connector/src/main/java/com/netflix/metacat/s3/connector/util/ConverterUtil.java @@ -0,0 +1,218 @@ +package com.netflix.metacat.s3.connector.util; + +import com.facebook.presto.spi.AuditInfo; +import com.facebook.presto.spi.ColumnDetailMetadata; +import com.facebook.presto.spi.ColumnMetadata; +import com.facebook.presto.spi.ConnectorPartition; +import com.facebook.presto.spi.ConnectorPartitionDetail; +import com.facebook.presto.spi.ConnectorTableDetailMetadata; +import com.facebook.presto.spi.ConnectorTableMetadata; +import com.facebook.presto.spi.StorageInfo; +import com.facebook.presto.spi.type.Type; +import com.facebook.presto.spi.type.TypeManager; +import com.facebook.presto.spi.type.TypeSignature; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.netflix.metacat.common.server.Config; +import com.netflix.metacat.converters.impl.PigTypeConverter; +import com.netflix.metacat.s3.connector.model.Field; +import com.netflix.metacat.s3.connector.model.Info; +import com.netflix.metacat.s3.connector.model.Location; +import com.netflix.metacat.s3.connector.model.Partition; +import com.netflix.metacat.s3.connector.model.Schema; +import com.netflix.metacat.s3.connector.model.Table; + +import javax.inject.Inject; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Created by amajumdar on 2/4/15. + */ +public class ConverterUtil { + @Inject + private Config config; + @Inject + private TypeManager typeManager; + @Inject + private PigTypeConverter pigTypeConverter; + public StorageInfo toStorageInfo(Table table){ + StorageInfo result = null; + Location location = table.getLocation(); + if( location != null) { + result = new StorageInfo(); + result.setUri( location.getUri()); + Info info = location.getInfo(); + if( info != null){ + result.setInputFormat(info.getInputFormat()); + result.setOutputFormat(info.getOutputFormat()); + result.setSerializationLib(info.getSerializationLib()); + result.setParameters(Maps.newHashMap(info.getParameters())); + } + } + return result; + } + + public String getOwner(Table table){ + String result = null; + Location location = table.getLocation(); + if( location != null){ + Info info = location.getInfo(); + if(info != null){ + result = info.getOwner(); + } + } + return result; + } + + public Location fromStorageInfo(StorageInfo storageInfo, String owner){ + Location result = new Location(); + if( storageInfo != null) { + result.setUri( storageInfo.getUri()); + Info info = new Info(); + info.setLocation(result); + info.setOwner(owner); + info.setInputFormat( storageInfo.getInputFormat()); + info.setOutputFormat( storageInfo.getOutputFormat()); + info.setSerializationLib( storageInfo.getSerializationLib()); + info.setParameters( storageInfo.getParameters()); + result.setInfo(info); + } else if ( owner != null){ + Info info = new Info(); + info.setLocation(result); + info.setOwner(owner); + result.setInfo(info); + } + return result; + } + + public List toFields(ConnectorTableMetadata tableMetadata, Schema schema) { + ImmutableList.Builder columns = ImmutableList.builder(); + int index = 0; + for( ColumnMetadata column: tableMetadata.getColumns()){ + Field field = toField( column); + field.setPos(index++); + field.setSchema(schema); + columns.add( field); + } + return columns.build(); + } + + public Field toField(ColumnMetadata column) { + Field result = new Field(); + result.setName( column.getName()); + result.setPartitionKey( column.isPartitionKey()); + result.setComment(column.getComment()); + result.setSourceType( column.getType().getDisplayName()); + result.setType(toTypeString( column.getType())); + return result; + } + + private String toTypeString(Type type) { + String result = null; + if (config.isUsePigTypes()){ + result = pigTypeConverter.fromType(type); + } else { + result = type.getDisplayName(); + } + return result; + } + + public Type toType(String type) { + Type result = null; + if (config.isUsePigTypes()) { + //Hack for now. We need to correct the type format in Franklin + if ("map".equals(type)) { + type = "map[]"; + } + result = pigTypeConverter.toType(type, typeManager); + } else { + result = typeManager.getType(TypeSignature.parseTypeSignature(type)); + } + return result; + } + + public AuditInfo toAuditInfo(Table table) { + AuditInfo result = new AuditInfo(); + Location location = table.getLocation(); + if( location != null){ + Info info = location.getInfo(); + if( info != null) { + result.setCreatedBy(info.getOwner()); + result.setLastUpdatedBy(info.getOwner()); + } + } + result.setCreatedDate(table.getCreatedDate()==null?null:table.getCreatedDate().getTime()/1000); + result.setLastUpdatedDate(table.getLastUpdatedDate()==null?null:table.getLastUpdatedDate().getTime()/1000); + return result; + } + + public ColumnMetadata toColumnMetadata(Field field) { + return new ColumnDetailMetadata(field.getName(), toType( field.getType()), field.isPartitionKey(), field.getComment(), false, field.getType()); + } + + public List toColumnMetadatas(Table table) { + List result = Lists.newArrayList(); + Location location = table.getLocation(); + if( location != null){ + Schema schema = location.getSchema(); + if( schema != null){ + result = schema.getFields().stream().sorted(Comparator.comparing(Field::getPos)).map(this::toColumnMetadata).collect(Collectors.toList()); + } + } + return result; + } + + public Location toLocation(ConnectorTableMetadata tableMetadata) { + Location location = null; + if( tableMetadata instanceof ConnectorTableDetailMetadata){ + ConnectorTableDetailMetadata tableDetailMetadata = (ConnectorTableDetailMetadata) tableMetadata; + location = fromStorageInfo(tableDetailMetadata.getStorageInfo(), tableDetailMetadata.getOwner()); + } else { + location = new Location(); + Info info = new Info(); + info.setLocation(location); + info.setOwner( tableMetadata.getOwner()); + location.setInfo(info); + } + Schema schema = new Schema(); + schema.setLocation(location); + schema.setFields(toFields(tableMetadata, schema)); + location.setSchema(schema); + return location; + } + + public Partition toPartition(Table table, ConnectorPartition partition) { + Partition result = new Partition(); + result.setTable(table); + result.setName(partition.getPartitionId()); + result.setUri(getUri(partition)); + return result; + } + + public String getUri(ConnectorPartition partition) { + String result = null; + if( partition instanceof ConnectorPartitionDetail) { + ConnectorPartitionDetail partitionDetail = (ConnectorPartitionDetail) partition; + if (partitionDetail.getStorageInfo() != null) { + result = partitionDetail.getStorageInfo().getUri(); + } + } + return result; + } + + + public List partitionKeys(Table table){ + List result = Lists.newArrayList(); + if( table.getLocation() != null){ + Schema schema = table.getLocation().getSchema(); + if( schema != null){ + List fields = schema.getFields(); + result = fields.stream().filter(Field::isPartitionKey).map(Field::getName).collect(Collectors.toList()); + } + } + return result; + } +} diff --git a/metacat-s3-connector/src/main/resources/META-INF/persistence.xml b/metacat-s3-connector/src/main/resources/META-INF/persistence.xml new file mode 100644 index 000000000..d0c9b6f03 --- /dev/null +++ b/metacat-s3-connector/src/main/resources/META-INF/persistence.xml @@ -0,0 +1,18 @@ + + + + org.hibernate.jpa.HibernatePersistenceProvider + com.netflix.metacat.s3.connector.model.BaseEntity + com.netflix.metacat.s3.connector.model.BaseTable + com.netflix.metacat.s3.connector.model.Database + com.netflix.metacat.s3.connector.model.Field + com.netflix.metacat.s3.connector.model.IdEntity + com.netflix.metacat.s3.connector.model.Info + com.netflix.metacat.s3.connector.model.Location + com.netflix.metacat.s3.connector.model.Partition + com.netflix.metacat.s3.connector.model.Schema + com.netflix.metacat.s3.connector.model.Source + com.netflix.metacat.s3.connector.model.Table + com.netflix.metacat.s3.connector.model.View + + \ No newline at end of file diff --git a/metacat-s3-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin b/metacat-s3-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin new file mode 100644 index 000000000..e075d2fbf --- /dev/null +++ b/metacat-s3-connector/src/main/resources/META-INF/services/com.facebook.presto.spi.Plugin @@ -0,0 +1 @@ +com.netflix.metacat.s3.connector.S3Plugin \ No newline at end of file diff --git a/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/BaseSpec.groovy b/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/BaseSpec.groovy new file mode 100644 index 000000000..5f2cc9eaa --- /dev/null +++ b/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/BaseSpec.groovy @@ -0,0 +1,102 @@ +package com.netflix.metacat.s3.connector + +import com.google.inject.Inject +import com.google.inject.persist.PersistService +import com.netflix.metacat.common.server.CommonModule +import com.netflix.metacat.converters.ConvertersModule +import io.airlift.testing.mysql.TestingMySqlServer +import spock.guice.UseModules +import spock.lang.Ignore +import spock.lang.Shared +import spock.lang.Specification + +import java.sql.Connection +import java.sql.DriverManager +import java.sql.SQLException +import java.sql.Statement +import java.util.concurrent.atomic.AtomicBoolean + +@UseModules([ + CommonModule.class, + S3TestModule.class, + ConvertersModule.class +]) +@Ignore +class BaseSpec extends Specification { + private static final AtomicBoolean initialized = new AtomicBoolean(); + @Shared @Inject + TestingMySqlServer mysqlServer; + @Shared @Inject + PersistService persistService + + def setupSpec() { + if (!initialized.compareAndSet(false, true)) { + return; + } + setupMysql() + } + + def setupMysql() { + File prepareFile = new File('src/test/resources/sql/prepare-test.sql') + if( !prepareFile.exists()){ + prepareFile = new File('metacat-s3-connector/src/test/resources/sql/prepare-test.sql') + } + runScript(DriverManager.getConnection(mysqlServer.getJdbcUrl()), new FileReader(prepareFile), ';') + + persistService.start() + } + + def runScript(Connection conn, Reader reader, String delimiter) throws IOException, + SQLException { + StringBuffer command = null; + try { + LineNumberReader lineReader = new LineNumberReader(reader); + String line = null; + while ((line = lineReader.readLine()) != null) { + if (command == null) { + command = new StringBuffer(); + } + String trimmedLine = line.trim(); + if (trimmedLine.startsWith("--")) { + println(trimmedLine); + } else if (trimmedLine.length() < 1 + || trimmedLine.startsWith("//")) { + // Do nothing + } else if (trimmedLine.length() < 1 + || trimmedLine.startsWith("--")) { + // Do nothing + } else if (trimmedLine.endsWith(delimiter)) { + command.append(line.substring(0, line + .lastIndexOf(delimiter))); + command.append(" "); + Statement statement = conn.createStatement(); + + println(command); + statement.execute(command.toString()); + + command = null; + try { + statement.close(); + } catch (Exception e) { + // Ignore to workaround a bug in Jakarta DBCP + } + Thread.yield(); + } else { + command.append(line); + command.append(" "); + } + } + } catch (Exception e) { + throw e; + } + } + + def cleanupSpec() { + if( persistService != null){ + persistService.stop() + } + if (mysqlServer != null) { + mysqlServer.close() + } + } +} diff --git a/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/S3TestModule.groovy b/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/S3TestModule.groovy new file mode 100644 index 000000000..1ddf04c28 --- /dev/null +++ b/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/S3TestModule.groovy @@ -0,0 +1,55 @@ +package com.netflix.metacat.s3.connector + +import com.facebook.presto.hive.HiveClientModule +import com.facebook.presto.type.TypeRegistry +import com.google.common.collect.Maps +import com.google.inject.Binder +import com.google.inject.Module +import com.google.inject.persist.jpa.JpaPersistModule +import com.google.inject.util.Modules +import io.airlift.configuration.ConfigurationFactory +import io.airlift.testing.mysql.TestingMySqlServer + +import java.nio.file.Files +import java.nio.file.Path +import java.nio.file.Paths + +import static java.lang.String.format + +/** + * Created by amajumdar on 10/12/15. + */ +class S3TestModule implements Module{ + @Override + void configure(Binder binder) { + TestingMySqlServer mysqlServer = new TestingMySqlServer("test", "test", "metacat") + Properties props = new Properties() + props.setProperty('javax.persistence.jdbc.url', format("jdbc:mysql://localhost:%d/%s?user=%s&password=%s", mysqlServer.port, "metacat", mysqlServer.user, mysqlServer.password)) + props.setProperty('javax.persistence.jdbc.user', mysqlServer.getUser()) + props.setProperty('javax.persistence.jdbc.password', mysqlServer.getPassword()) + props.setProperty('javax.persistence.jdbc.driver', 'com.mysql.jdbc.Driver') + props.setProperty('javax.jdo.option.defaultTransactionIsolation','READ_COMMITTED') + props.setProperty('javax.jdo.option.defaultAutoCommit', 'false'); + props.setProperty('javax.persistence.schema-generation.database.action', 'drop-and-create') + + URL url = Thread.currentThread().getContextClassLoader().getResource("s3.properties") + Path filePath + if( url != null) { + filePath = Paths.get(url.toURI()); + } else { + File metadataFile = new File('src/test/resources/s3.properties') + if( !metadataFile.exists()){ + metadataFile = new File('metacat-s3-connector/src/test/resources/s3.properties') + } + filePath = Paths.get(metadataFile.getPath()) + } + props.store(Files.newOutputStream(filePath), "test") + new JpaPersistModule("s3").properties(props).configure(binder) + binder.bind(TestingMySqlServer.class).toInstance(mysqlServer) + + binder.bind(ConfigurationFactory.class).toInstance(new ConfigurationFactory(Maps.newHashMap())) + HiveClientModule hiveClientModule = new HiveClientModule("s3", null, new TypeRegistry()) + Module module = Modules.override(hiveClientModule).with(new S3Module()); + module.configure(binder) + } +} diff --git a/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/dao/impl/DatabaseDaoImplSpec.groovy b/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/dao/impl/DatabaseDaoImplSpec.groovy new file mode 100644 index 000000000..ed5640374 --- /dev/null +++ b/metacat-s3-connector/src/test/groovy/com/netflix/metacat/s3/connector/dao/impl/DatabaseDaoImplSpec.groovy @@ -0,0 +1,61 @@ +package com.netflix.metacat.s3.connector.dao.impl + +import com.netflix.metacat.s3.connector.BaseSpec +import com.netflix.metacat.s3.connector.dao.DatabaseDao +import com.netflix.metacat.s3.connector.dao.SourceDao +import com.netflix.metacat.s3.connector.model.Database +import com.netflix.metacat.s3.connector.model.Source + +import javax.inject.Inject +import javax.persistence.EntityManager + +/** + * Created by amajumdar on 10/12/15. + */ +class DatabaseDaoImplSpec extends BaseSpec{ + @Inject + DatabaseDao databaseDao + @Inject + SourceDao sourceDao + @Inject + EntityManager em + def testAll(){ + given: + def source = new Source(name:'s3', type:'s3') + def database = new Database(name: 'test', source: source) + def tx = em.getTransaction() + when: + tx.begin() + sourceDao.save(source, true) + databaseDao.save( database, true) + tx.commit() + then: + databaseDao.getBySourceDatabaseName('s3', 'test') != null + when: + tx.begin() + databaseDao.deleteById(database.id) + tx.commit() + then: + databaseDao.get(database.id) == null + } + + def testCascade(){ + given: + def source = new Source(name:'c3', type:'c3') + def database = new Database(name: 'cascade', source: source) + source.setDatabases([database]) + def tx = em.getTransaction() + when: + tx.begin() + sourceDao.save(source, true) + tx.commit() + then: + databaseDao.getBySourceDatabaseName('c3', 'cascade') != null + when: + tx.begin() + sourceDao.delete([source]) + tx.commit() + then: + databaseDao.get([database.id]).size() == 0 + } +} diff --git a/metacat-s3-connector/src/test/resources/s3.properties b/metacat-s3-connector/src/test/resources/s3.properties new file mode 100644 index 000000000..150cb11dd --- /dev/null +++ b/metacat-s3-connector/src/test/resources/s3.properties @@ -0,0 +1,9 @@ +#test +#Wed Nov 04 17:32:17 PST 2015 +javax.jdo.option.defaultAutoCommit=false +javax.persistence.jdbc.user=test +javax.persistence.jdbc.url=jdbc\:mysql\://localhost\:51849/metacat?user\=test&password\=test +javax.persistence.jdbc.driver=com.mysql.jdbc.Driver +javax.jdo.option.defaultTransactionIsolation=READ_COMMITTED +javax.persistence.schema-generation.database.action=drop-and-create +javax.persistence.jdbc.password=test diff --git a/metacat-s3-connector/src/test/resources/sql/prepare-test.sql b/metacat-s3-connector/src/test/resources/sql/prepare-test.sql new file mode 100644 index 000000000..ea04d6525 --- /dev/null +++ b/metacat-s3-connector/src/test/resources/sql/prepare-test.sql @@ -0,0 +1,2 @@ +CREATE SCHEMA if not exists metacat; + diff --git a/metacat-server/build.gradle b/metacat-server/build.gradle new file mode 100644 index 000000000..44c1d9540 --- /dev/null +++ b/metacat-server/build.gradle @@ -0,0 +1,50 @@ +apply plugin: "war" +apply plugin: 'jetty' + +configurations { + all*.exclude module: "slf4j-jdk14" + all*.exclude module: "log4j-over-slf4j" + all*.exclude module: "jsr311-api" + all*.exclude module: "logback-classic" + all*.exclude module: "logback-core" +} + +dependencies { + compile project(':metacat-main') + compile project(':metacat-user-metadata-mysql') + compile 'com.netflix.blitz4j:blitz4j:1.36.0' + compile('com.squarespace.jersey2-guice:jersey2-guice:0.5') { + exclude group: 'com.google.inject' + exclude group: 'com.google.inject.extensions' + } + compile "org.glassfish.jersey.containers:jersey-container-servlet:${jersey_version}" + compile "org.glassfish.jersey.media:jersey-media-json-jackson:${jersey_version}" + compile("com.wordnik:swagger-jersey2-jaxrs_2.10:${swagger_version}") { + exclude group: 'com.sun.jersey' + exclude group: 'javax.servlet' + } + runtime "org.slf4j:slf4j-log4j12:${slf4j_version}" + + providedCompile 'javax.servlet:javax.servlet-api:3.1.0' +} + +task(console, dependsOn: 'testClasses', type: JavaExec) { + main = 'groovy.ui.Console' + classpath = sourceSets.test.runtimeClasspath +} + +war { + baseName = 'metacat' +} + +jettyRunWar { + httpPort = 7001 + contextPath = '' +} + +task buildAndCopyWar(type: Exec, dependsOn: 'war') { + workingDir rootDir + + executable 'sh' + args '-c', 'mkdir build || true && find . -type f -name "*.war" | xargs ls -r | head -1 | xargs -J % cp % build/metacat.war' +} diff --git a/metacat-server/src/main/java/com/netflix/metacat/server/init/MetacatContextListener.java b/metacat-server/src/main/java/com/netflix/metacat/server/init/MetacatContextListener.java new file mode 100644 index 000000000..2841a4271 --- /dev/null +++ b/metacat-server/src/main/java/com/netflix/metacat/server/init/MetacatContextListener.java @@ -0,0 +1,82 @@ +package com.netflix.metacat.server.init; + +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.inject.Module; +import com.netflix.blitz4j.LoggingConfiguration; +import com.netflix.config.ConfigurationManager; +import com.netflix.metacat.common.server.Config; +import com.netflix.metacat.main.init.MetacatInitializationService; +import com.netflix.metacat.main.init.MetacatServletModule; +import com.netflix.metacat.usermetadata.mysql.MysqlUserMetadataModule; +import com.squarespace.jersey2.guice.JerseyGuiceServletContextListener; +import com.wordnik.swagger.jaxrs.config.BeanConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.ServletContextEvent; +import java.io.IOException; +import java.util.List; + +public class MetacatContextListener extends JerseyGuiceServletContextListener { + static { + // Initialize configuration + System.setProperty("archaius.deployment.applicationId", "metacat"); + try { + ConfigurationManager.loadCascadedPropertiesFromResources("metacat"); + } catch (IOException ignored) { + //Do not stop the server initialization + } + //Initialize logging + LoggingConfiguration.getInstance().configure(); + } + + private static final Logger log = LoggerFactory.getLogger(MetacatContextListener.class); + + @Override + public void contextDestroyed(ServletContextEvent sce) { + log.info("Start contextDestroyed"); + super.contextDestroyed(sce); + // Stop logging + LoggingConfiguration.getInstance().stop(); + MetacatInitializationService service = getInjector().getInstance(MetacatInitializationService.class); + try { + service.stop(); + } catch (Throwable t) { + log.error("Error stopping Metacat", t); + throw Throwables.propagate(t); + } + log.info("Finish contextDestroyed"); + } + + @Override + public void contextInitialized(ServletContextEvent sce) { + log.info("Start contextInitialized"); + super.contextInitialized(sce); + + Config config = getInjector().getInstance(Config.class); + MetacatInitializationService service = getInjector().getInstance(MetacatInitializationService.class); + try { + service.start(); + } catch (Throwable t) { + log.error("Error initializing Metacat", t); + throw Throwables.propagate(t); + } + // Configure and Initialize swagger using + BeanConfig beanConfig = new BeanConfig(); + beanConfig.setVersion(config.getMetacatVersion()); + beanConfig.setBasePath("/"); + beanConfig.setResourcePackage("com.netflix.metacat"); + beanConfig.setScan(true); + + log.info("Finish contextInitialized"); + } + + @Override + protected List modules() { + return ImmutableList.of( + new MetacatServletModule(), + new MysqlUserMetadataModule() + ); + } +} diff --git a/metacat-server/src/main/java/com/netflix/metacat/server/init/MetacatJsonProvider.java b/metacat-server/src/main/java/com/netflix/metacat/server/init/MetacatJsonProvider.java new file mode 100644 index 000000000..8b27d8249 --- /dev/null +++ b/metacat-server/src/main/java/com/netflix/metacat/server/init/MetacatJsonProvider.java @@ -0,0 +1,19 @@ +package com.netflix.metacat.server.init; + +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; +import com.netflix.metacat.common.json.MetacatJsonLocator; + +import javax.annotation.Priority; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.ext.Provider; + +@Provider +@Produces(MediaType.APPLICATION_JSON) +@Priority(10_000) +public class MetacatJsonProvider extends JacksonJaxbJsonProvider { + public MetacatJsonProvider() { + super(); + setMapper(MetacatJsonLocator.INSTANCE.getObjectMapper()); + } +} diff --git a/metacat-server/src/main/java/com/netflix/metacat/server/jersey/MetacatRestFilter.java b/metacat-server/src/main/java/com/netflix/metacat/server/jersey/MetacatRestFilter.java new file mode 100644 index 000000000..b0eeb63e4 --- /dev/null +++ b/metacat-server/src/main/java/com/netflix/metacat/server/jersey/MetacatRestFilter.java @@ -0,0 +1,41 @@ +package com.netflix.metacat.server.jersey; + +import com.netflix.metacat.common.MetacatContext; +import com.netflix.metacat.common.util.MetacatContextManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.container.ContainerRequestFilter; +import javax.ws.rs.container.ContainerResponseContext; +import javax.ws.rs.container.ContainerResponseFilter; +import javax.ws.rs.ext.Provider; +import java.io.IOException; + +/** + * Created by amajumdar on 8/3/15. + */ +@Provider +public class MetacatRestFilter implements ContainerRequestFilter, ContainerResponseFilter{ + private static final Logger log = LoggerFactory.getLogger(MetacatRestFilter.class); + @Override + public void filter(ContainerRequestContext requestContext) throws IOException { + String userName = requestContext.getHeaderString(MetacatContext.HEADER_KEY_USER_NAME); + if( userName == null){ + userName = "metacat"; + } + String clientAppName = requestContext.getHeaderString(MetacatContext.HEADER_KEY_CLIENT_APP_NAME); + String clientHost = requestContext.getHeaderString("X-Forwarded-For"); + String jobId = requestContext.getHeaderString(MetacatContext.HEADER_KEY_JOB_ID); + String dataTypeContext = requestContext.getHeaderString(MetacatContext.HEADER_KEY_DATA_TYPE_CONTEXT); + MetacatContext context = new MetacatContext( userName, clientAppName, clientHost, jobId, dataTypeContext); + MetacatContextManager.setContext(context); + log.info(context.toString()); + } + + @Override + public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) + throws IOException { + MetacatContextManager.removeContext(); + } +} diff --git a/metacat-server/src/main/resources/log4j.properties b/metacat-server/src/main/resources/log4j.properties new file mode 100644 index 000000000..ebc38dcd6 --- /dev/null +++ b/metacat-server/src/main/resources/log4j.properties @@ -0,0 +1,25 @@ +# Logging +log4j.rootLogger=DEBUG, stdout +log4j.rootCategory=INFO,EventCounter +log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MMM-dd HH:mm:ss,SSS} [%t] %c %x %-5p %m%n +log4j.logger.asyncAppenders=INFO, stdout +log4j.logger.com.netflix=INFO +log4j.logger.httpclient=ERROR +log4j.logger.org.apache=ERROR +log4j.logger.DataNucleus=ERROR +log4j.logger.Datastore=ERROR +log4j.logger.Datastore.Schema=ERROR +log4j.logger.JPOX.Datastore=ERROR +log4j.logger.JPOX.Plugin=ERROR +log4j.logger.JPOX.MetaData=ERROR +log4j.logger.JPOX.Query=ERROR +log4j.logger.JPOX.General=ERROR +log4j.logger.JPOX.Enhancer=ERROR +log4j.logger.org.jets3t=WARN +log4j.logger.com.netflix.monitoring.plugins.AtlasWriter=ERROR +log4j.logger.com.wordnik=ERROR +log4j.logger.org.atmosphere=ERROR diff --git a/metacat-server/src/main/resources/metacat.properties b/metacat-server/src/main/resources/metacat.properties new file mode 100644 index 000000000..a6d1180e4 --- /dev/null +++ b/metacat-server/src/main/resources/metacat.properties @@ -0,0 +1,5 @@ +## Application info +netflix.appinfo.name=metacat +netflix.appinfo.version=1.0 + +metacat.plugin.config.location=/etc/catalog diff --git a/metacat-server/src/main/webapp/WEB-INF/web.xml b/metacat-server/src/main/webapp/WEB-INF/web.xml new file mode 100644 index 000000000..0d5d4e37a --- /dev/null +++ b/metacat-server/src/main/webapp/WEB-INF/web.xml @@ -0,0 +1,46 @@ + + + guiceFilter + com.google.inject.servlet.GuiceFilter + + + guiceFilter + /* + + + jersey + org.glassfish.jersey.servlet.ServletContainer + + jersey.config.server.provider.packages + + com.netflix.metacat, + com.wordnik.swagger.jaxrs.json, + com.sun.jersey + + + + jersey.config.server.provider.classnames + + com.wordnik.swagger.jersey.listing.ApiListingResourceJSON, + com.wordnik.swagger.jersey.listing.JerseyApiDeclarationProvider, + com.wordnik.swagger.jersey.listing.JerseyResourceListingProvider + + + + com.sun.jersey.api.json.POJOMappingFeature + true + + + jersey.config.servlet.filter.staticContentRegex + /(web|docs)/.*|/favicon.ico + + + + jersey + /* + + + com.netflix.metacat.server.init.MetacatContextListener + + diff --git a/metacat-server/src/main/webapp/docs/api/css/reset.css b/metacat-server/src/main/webapp/docs/api/css/reset.css new file mode 100644 index 000000000..b2b078943 --- /dev/null +++ b/metacat-server/src/main/webapp/docs/api/css/reset.css @@ -0,0 +1,125 @@ +/* http://meyerweb.com/eric/tools/css/reset/ v2.0 | 20110126 */ +html, +body, +div, +span, +applet, +object, +iframe, +h1, +h2, +h3, +h4, +h5, +h6, +p, +blockquote, +pre, +a, +abbr, +acronym, +address, +big, +cite, +code, +del, +dfn, +em, +img, +ins, +kbd, +q, +s, +samp, +small, +strike, +strong, +sub, +sup, +tt, +var, +b, +u, +i, +center, +dl, +dt, +dd, +ol, +ul, +li, +fieldset, +form, +label, +legend, +table, +caption, +tbody, +tfoot, +thead, +tr, +th, +td, +article, +aside, +canvas, +details, +embed, +figure, +figcaption, +footer, +header, +hgroup, +menu, +nav, +output, +ruby, +section, +summary, +time, +mark, +audio, +video { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + font: inherit; + vertical-align: baseline; +} +/* HTML5 display-role reset for older browsers */ +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +menu, +nav, +section { + display: block; +} +body { + line-height: 1; +} +ol, +ul { + list-style: none; +} +blockquote, +q { + quotes: none; +} +blockquote:before, +blockquote:after, +q:before, +q:after { + content: ''; + content: none; +} +table { + border-collapse: collapse; + border-spacing: 0; +} diff --git a/metacat-server/src/main/webapp/docs/api/css/screen.css b/metacat-server/src/main/webapp/docs/api/css/screen.css new file mode 100644 index 000000000..8052c4b32 --- /dev/null +++ b/metacat-server/src/main/webapp/docs/api/css/screen.css @@ -0,0 +1,1221 @@ +/* Original style from softwaremaniacs.org (c) Ivan Sagalaev */ +.swagger-section pre code { + display: block; + padding: 0.5em; + background: #F0F0F0; +} +.swagger-section pre code, +.swagger-section pre .subst, +.swagger-section pre .tag .title, +.swagger-section pre .lisp .title, +.swagger-section pre .clojure .built_in, +.swagger-section pre .nginx .title { + color: black; +} +.swagger-section pre .string, +.swagger-section pre .title, +.swagger-section pre .constant, +.swagger-section pre .parent, +.swagger-section pre .tag .value, +.swagger-section pre .rules .value, +.swagger-section pre .rules .value .number, +.swagger-section pre .preprocessor, +.swagger-section pre .ruby .symbol, +.swagger-section pre .ruby .symbol .string, +.swagger-section pre .aggregate, +.swagger-section pre .template_tag, +.swagger-section pre .django .variable, +.swagger-section pre .smalltalk .class, +.swagger-section pre .addition, +.swagger-section pre .flow, +.swagger-section pre .stream, +.swagger-section pre .bash .variable, +.swagger-section pre .apache .tag, +.swagger-section pre .apache .cbracket, +.swagger-section pre .tex .command, +.swagger-section pre .tex .special, +.swagger-section pre .erlang_repl .function_or_atom, +.swagger-section pre .markdown .header { + color: #800; +} +.swagger-section pre .comment, +.swagger-section pre .annotation, +.swagger-section pre .template_comment, +.swagger-section pre .diff .header, +.swagger-section pre .chunk, +.swagger-section pre .markdown .blockquote { + color: #888; +} +.swagger-section pre .number, +.swagger-section pre .date, +.swagger-section pre .regexp, +.swagger-section pre .literal, +.swagger-section pre .smalltalk .symbol, +.swagger-section pre .smalltalk .char, +.swagger-section pre .go .constant, +.swagger-section pre .change, +.swagger-section pre .markdown .bullet, +.swagger-section pre .markdown .link_url { + color: #080; +} +.swagger-section pre .label, +.swagger-section pre .javadoc, +.swagger-section pre .ruby .string, +.swagger-section pre .decorator, +.swagger-section pre .filter .argument, +.swagger-section pre .localvars, +.swagger-section pre .array, +.swagger-section pre .attr_selector, +.swagger-section pre .important, +.swagger-section pre .pseudo, +.swagger-section pre .pi, +.swagger-section pre .doctype, +.swagger-section pre .deletion, +.swagger-section pre .envvar, +.swagger-section pre .shebang, +.swagger-section pre .apache .sqbracket, +.swagger-section pre .nginx .built_in, +.swagger-section pre .tex .formula, +.swagger-section pre .erlang_repl .reserved, +.swagger-section pre .prompt, +.swagger-section pre .markdown .link_label, +.swagger-section pre .vhdl .attribute, +.swagger-section pre .clojure .attribute, +.swagger-section pre .coffeescript .property { + color: #8888ff; +} +.swagger-section pre .keyword, +.swagger-section pre .id, +.swagger-section pre .phpdoc, +.swagger-section pre .title, +.swagger-section pre .built_in, +.swagger-section pre .aggregate, +.swagger-section pre .css .tag, +.swagger-section pre .javadoctag, +.swagger-section pre .phpdoc, +.swagger-section pre .yardoctag, +.swagger-section pre .smalltalk .class, +.swagger-section pre .winutils, +.swagger-section pre .bash .variable, +.swagger-section pre .apache .tag, +.swagger-section pre .go .typename, +.swagger-section pre .tex .command, +.swagger-section pre .markdown .strong, +.swagger-section pre .request, +.swagger-section pre .status { + font-weight: bold; +} +.swagger-section pre .markdown .emphasis { + font-style: italic; +} +.swagger-section pre .nginx .built_in { + font-weight: normal; +} +.swagger-section pre .coffeescript .javascript, +.swagger-section pre .javascript .xml, +.swagger-section pre .tex .formula, +.swagger-section pre .xml .javascript, +.swagger-section pre .xml .vbscript, +.swagger-section pre .xml .css, +.swagger-section pre .xml .cdata { + opacity: 0.5; +} +.swagger-section .swagger-ui-wrap { + line-height: 1; + font-family: "Droid Sans", sans-serif; + max-width: 960px; + margin-left: auto; + margin-right: auto; +} +.swagger-section .swagger-ui-wrap b, +.swagger-section .swagger-ui-wrap strong { + font-family: "Droid Sans", sans-serif; + font-weight: bold; +} +.swagger-section .swagger-ui-wrap q, +.swagger-section .swagger-ui-wrap blockquote { + quotes: none; +} +.swagger-section .swagger-ui-wrap p { + line-height: 1.4em; + padding: 0 0 10px; + color: #333333; +} +.swagger-section .swagger-ui-wrap q:before, +.swagger-section .swagger-ui-wrap q:after, +.swagger-section .swagger-ui-wrap blockquote:before, +.swagger-section .swagger-ui-wrap blockquote:after { + content: none; +} +.swagger-section .swagger-ui-wrap .heading_with_menu h1, +.swagger-section .swagger-ui-wrap .heading_with_menu h2, +.swagger-section .swagger-ui-wrap .heading_with_menu h3, +.swagger-section .swagger-ui-wrap .heading_with_menu h4, +.swagger-section .swagger-ui-wrap .heading_with_menu h5, +.swagger-section .swagger-ui-wrap .heading_with_menu h6 { + display: block; + clear: none; + float: left; + -moz-box-sizing: border-box; + -webkit-box-sizing: border-box; + -ms-box-sizing: border-box; + box-sizing: border-box; + width: 60%; +} +.swagger-section .swagger-ui-wrap table { + border-collapse: collapse; + border-spacing: 0; +} +.swagger-section .swagger-ui-wrap table thead tr th { + padding: 5px; + font-size: 0.9em; + color: #666666; + border-bottom: 1px solid #999999; +} +.swagger-section .swagger-ui-wrap table tbody tr:last-child td { + border-bottom: none; +} +.swagger-section .swagger-ui-wrap table tbody tr.offset { + background-color: #f0f0f0; +} +.swagger-section .swagger-ui-wrap table tbody tr td { + padding: 6px; + font-size: 0.9em; + border-bottom: 1px solid #cccccc; + vertical-align: top; + line-height: 1.3em; +} +.swagger-section .swagger-ui-wrap ol { + margin: 0px 0 10px; + padding: 0 0 0 18px; + list-style-type: decimal; +} +.swagger-section .swagger-ui-wrap ol li { + padding: 5px 0px; + font-size: 0.9em; + color: #333333; +} +.swagger-section .swagger-ui-wrap ol, +.swagger-section .swagger-ui-wrap ul { + list-style: none; +} +.swagger-section .swagger-ui-wrap h1 a, +.swagger-section .swagger-ui-wrap h2 a, +.swagger-section .swagger-ui-wrap h3 a, +.swagger-section .swagger-ui-wrap h4 a, +.swagger-section .swagger-ui-wrap h5 a, +.swagger-section .swagger-ui-wrap h6 a { + text-decoration: none; +} +.swagger-section .swagger-ui-wrap h1 a:hover, +.swagger-section .swagger-ui-wrap h2 a:hover, +.swagger-section .swagger-ui-wrap h3 a:hover, +.swagger-section .swagger-ui-wrap h4 a:hover, +.swagger-section .swagger-ui-wrap h5 a:hover, +.swagger-section .swagger-ui-wrap h6 a:hover { + text-decoration: underline; +} +.swagger-section .swagger-ui-wrap h1 span.divider, +.swagger-section .swagger-ui-wrap h2 span.divider, +.swagger-section .swagger-ui-wrap h3 span.divider, +.swagger-section .swagger-ui-wrap h4 span.divider, +.swagger-section .swagger-ui-wrap h5 span.divider, +.swagger-section .swagger-ui-wrap h6 span.divider { + color: #aaaaaa; +} +.swagger-section .swagger-ui-wrap a { + color: #547f00; +} +.swagger-section .swagger-ui-wrap a img { + border: none; +} +.swagger-section .swagger-ui-wrap article, +.swagger-section .swagger-ui-wrap aside, +.swagger-section .swagger-ui-wrap details, +.swagger-section .swagger-ui-wrap figcaption, +.swagger-section .swagger-ui-wrap figure, +.swagger-section .swagger-ui-wrap footer, +.swagger-section .swagger-ui-wrap header, +.swagger-section .swagger-ui-wrap hgroup, +.swagger-section .swagger-ui-wrap menu, +.swagger-section .swagger-ui-wrap nav, +.swagger-section .swagger-ui-wrap section, +.swagger-section .swagger-ui-wrap summary { + display: block; +} +.swagger-section .swagger-ui-wrap pre { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + background-color: #fcf6db; + border: 1px solid #e5e0c6; + padding: 10px; +} +.swagger-section .swagger-ui-wrap pre code { + line-height: 1.6em; + background: none; +} +.swagger-section .swagger-ui-wrap .content > .content-type > div > label { + clear: both; + display: block; + color: #0F6AB4; + font-size: 1.1em; + margin: 0; + padding: 15px 0 5px; +} +.swagger-section .swagger-ui-wrap .content pre { + font-size: 12px; + margin-top: 5px; + padding: 5px; +} +.swagger-section .swagger-ui-wrap .icon-btn { + cursor: pointer; +} +.swagger-section .swagger-ui-wrap .info_title { + padding-bottom: 10px; + font-weight: bold; + font-size: 25px; +} +.swagger-section .swagger-ui-wrap p.big, +.swagger-section .swagger-ui-wrap div.big p { + font-size: 1em; + margin-bottom: 10px; +} +.swagger-section .swagger-ui-wrap form.fullwidth ol li.string input, +.swagger-section .swagger-ui-wrap form.fullwidth ol li.url input, +.swagger-section .swagger-ui-wrap form.fullwidth ol li.text textarea, +.swagger-section .swagger-ui-wrap form.fullwidth ol li.numeric input { + width: 500px !important; +} +.swagger-section .swagger-ui-wrap .info_license { + padding-bottom: 5px; +} +.swagger-section .swagger-ui-wrap .info_tos { + padding-bottom: 5px; +} +.swagger-section .swagger-ui-wrap .message-fail { + color: #cc0000; +} +.swagger-section .swagger-ui-wrap .info_contact { + padding-bottom: 5px; +} +.swagger-section .swagger-ui-wrap .info_description { + padding-bottom: 10px; + font-size: 15px; +} +.swagger-section .swagger-ui-wrap .markdown ol li, +.swagger-section .swagger-ui-wrap .markdown ul li { + padding: 3px 0px; + line-height: 1.4em; + color: #333333; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.string input, +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.url input, +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.numeric input { + display: block; + padding: 4px; + width: auto; + clear: both; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.string input.title, +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.url input.title, +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.numeric input.title { + font-size: 1.3em; +} +.swagger-section .swagger-ui-wrap table.fullwidth { + width: 100%; +} +.swagger-section .swagger-ui-wrap .model-signature { + font-family: "Droid Sans", sans-serif; + font-size: 1em; + line-height: 1.5em; +} +.swagger-section .swagger-ui-wrap .model-signature .signature-nav a { + text-decoration: none; + color: #AAA; +} +.swagger-section .swagger-ui-wrap .model-signature .signature-nav a:hover { + text-decoration: underline; + color: black; +} +.swagger-section .swagger-ui-wrap .model-signature .signature-nav .selected { + color: black; + text-decoration: none; +} +.swagger-section .swagger-ui-wrap .model-signature .propType { + color: #5555aa; +} +.swagger-section .swagger-ui-wrap .model-signature pre:hover { + background-color: #ffffdd; +} +.swagger-section .swagger-ui-wrap .model-signature pre { + font-size: .85em; + line-height: 1.2em; + overflow: auto; + max-height: 200px; + cursor: pointer; +} +.swagger-section .swagger-ui-wrap .model-signature ul.signature-nav { + display: block; + margin: 0; + padding: 0; +} +.swagger-section .swagger-ui-wrap .model-signature ul.signature-nav li:last-child { + padding-right: 0; + border-right: none; +} +.swagger-section .swagger-ui-wrap .model-signature ul.signature-nav li { + float: left; + margin: 0 5px 5px 0; + padding: 2px 5px 2px 0; + border-right: 1px solid #ddd; +} +.swagger-section .swagger-ui-wrap .model-signature .propOpt { + color: #555; +} +.swagger-section .swagger-ui-wrap .model-signature .snippet small { + font-size: 0.75em; +} +.swagger-section .swagger-ui-wrap .model-signature .propOptKey { + font-style: italic; +} +.swagger-section .swagger-ui-wrap .model-signature .description .strong { + font-weight: bold; + color: #000; + font-size: .9em; +} +.swagger-section .swagger-ui-wrap .model-signature .description div { + font-size: 0.9em; + line-height: 1.5em; + margin-left: 1em; +} +.swagger-section .swagger-ui-wrap .model-signature .description .stronger { + font-weight: bold; + color: #000; +} +.swagger-section .swagger-ui-wrap .model-signature .propName { + font-weight: bold; +} +.swagger-section .swagger-ui-wrap .model-signature .signature-container { + clear: both; +} +.swagger-section .swagger-ui-wrap .body-textarea { + width: 300px; + height: 100px; + border: 1px solid #aaa; +} +.swagger-section .swagger-ui-wrap .markdown p code, +.swagger-section .swagger-ui-wrap .markdown li code { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + background-color: #f0f0f0; + color: black; + padding: 1px 3px; +} +.swagger-section .swagger-ui-wrap .required { + font-weight: bold; +} +.swagger-section .swagger-ui-wrap input.parameter { + width: 300px; + border: 1px solid #aaa; +} +.swagger-section .swagger-ui-wrap h1 { + color: black; + font-size: 1.5em; + line-height: 1.3em; + padding: 10px 0 10px 0; + font-family: "Droid Sans", sans-serif; + font-weight: bold; +} +.swagger-section .swagger-ui-wrap .heading_with_menu { + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-section .swagger-ui-wrap .heading_with_menu ul { + display: block; + clear: none; + float: right; + -moz-box-sizing: border-box; + -webkit-box-sizing: border-box; + -ms-box-sizing: border-box; + box-sizing: border-box; + margin-top: 10px; +} +.swagger-section .swagger-ui-wrap h2 { + color: black; + font-size: 1.3em; + padding: 10px 0 10px 0; +} +.swagger-section .swagger-ui-wrap h2 a { + color: black; +} +.swagger-section .swagger-ui-wrap h2 span.sub { + font-size: 0.7em; + color: #999999; + font-style: italic; +} +.swagger-section .swagger-ui-wrap h2 span.sub a { + color: #777777; +} +.swagger-section .swagger-ui-wrap span.weak { + color: #666666; +} +.swagger-section .swagger-ui-wrap .message-success { + color: #89BF04; +} +.swagger-section .swagger-ui-wrap caption, +.swagger-section .swagger-ui-wrap th, +.swagger-section .swagger-ui-wrap td { + text-align: left; + font-weight: normal; + vertical-align: middle; +} +.swagger-section .swagger-ui-wrap .code { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.text textarea { + font-family: "Droid Sans", sans-serif; + height: 250px; + padding: 4px; + display: block; + clear: both; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.select select { + display: block; + clear: both; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.boolean { + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.boolean label { + display: block; + float: left; + clear: none; + margin: 0; + padding: 0; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.boolean input { + display: block; + float: left; + clear: none; + margin: 0 5px 0 0; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li.required label { + color: black; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li label { + display: block; + clear: both; + width: auto; + padding: 0 0 3px; + color: #666666; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li label abbr { + padding-left: 3px; + color: #888888; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.inputs ol li p.inline-hints { + margin-left: 0; + font-style: italic; + font-size: 0.9em; + margin: 0; +} +.swagger-section .swagger-ui-wrap form.formtastic fieldset.buttons { + margin: 0; + padding: 0; +} +.swagger-section .swagger-ui-wrap span.blank, +.swagger-section .swagger-ui-wrap span.empty { + color: #888888; + font-style: italic; +} +.swagger-section .swagger-ui-wrap .markdown h3 { + color: #547f00; +} +.swagger-section .swagger-ui-wrap .markdown h4 { + color: #666666; +} +.swagger-section .swagger-ui-wrap .markdown pre { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + background-color: #fcf6db; + border: 1px solid #e5e0c6; + padding: 10px; + margin: 0 0 10px 0; +} +.swagger-section .swagger-ui-wrap .markdown pre code { + line-height: 1.6em; +} +.swagger-section .swagger-ui-wrap div.gist { + margin: 20px 0 25px 0 !important; +} +.swagger-section .swagger-ui-wrap ul#resources { + font-family: "Droid Sans", sans-serif; + font-size: 0.9em; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource { + border-bottom: 1px solid #dddddd; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource:hover div.heading h2 a, +.swagger-section .swagger-ui-wrap ul#resources li.resource.active div.heading h2 a { + color: black; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource:hover div.heading ul.options li a, +.swagger-section .swagger-ui-wrap ul#resources li.resource.active div.heading ul.options li a { + color: #555555; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource:last-child { + border-bottom: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading { + border: 1px solid transparent; + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options { + overflow: hidden; + padding: 0; + display: block; + clear: none; + float: right; + margin: 14px 10px 0 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li { + float: left; + clear: none; + margin: 0; + padding: 2px 10px; + border-right: 1px solid #dddddd; + color: #666666; + font-size: 0.9em; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li a { + color: #aaaaaa; + text-decoration: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li a:hover { + text-decoration: underline; + color: black; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li a:hover, +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li a:active, +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li a.active { + text-decoration: underline; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li:first-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li.first { + padding-left: 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options li.last { + padding-right: 0; + border-right: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options:first-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading ul.options.first { + padding-left: 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading h2 { + color: #999999; + padding-left: 0; + display: block; + clear: none; + float: left; + font-family: "Droid Sans", sans-serif; + font-weight: bold; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading h2 a { + color: #999999; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource div.heading h2 a:hover { + color: black; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation { + float: none; + clear: both; + overflow: hidden; + display: block; + margin: 0 0 10px; + padding: 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading { + float: none; + clear: both; + overflow: hidden; + display: block; + margin: 0; + padding: 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 { + display: block; + clear: none; + float: left; + width: auto; + margin: 0; + padding: 0; + line-height: 1.1em; + color: black; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.path { + padding-left: 10px; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.path a { + color: black; + text-decoration: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.path a:hover { + text-decoration: underline; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.http_method a { + text-transform: uppercase; + text-decoration: none; + color: white; + display: inline-block; + width: 50px; + font-size: 0.7em; + text-align: center; + padding: 7px 0 4px; + -moz-border-radius: 2px; + -webkit-border-radius: 2px; + -o-border-radius: 2px; + -ms-border-radius: 2px; + -khtml-border-radius: 2px; + border-radius: 2px; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span { + margin: 0; + padding: 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options { + overflow: hidden; + padding: 0; + display: block; + clear: none; + float: right; + margin: 6px 10px 0 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options li { + float: left; + clear: none; + margin: 0; + padding: 2px 10px; + font-size: 0.9em; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options li a { + text-decoration: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options li.access { + color: black; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content { + border-top: none; + padding: 10px; + -moz-border-radius-bottomleft: 6px; + -webkit-border-bottom-left-radius: 6px; + -o-border-bottom-left-radius: 6px; + -ms-border-bottom-left-radius: 6px; + -khtml-border-bottom-left-radius: 6px; + border-bottom-left-radius: 6px; + -moz-border-radius-bottomright: 6px; + -webkit-border-bottom-right-radius: 6px; + -o-border-bottom-right-radius: 6px; + -ms-border-bottom-right-radius: 6px; + -khtml-border-bottom-right-radius: 6px; + border-bottom-right-radius: 6px; + margin: 0 0 20px; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content h4 { + font-size: 1.1em; + margin: 0; + padding: 15px 0 5px; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header { + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header a { + padding: 4px 0 0 10px; + display: inline-block; + font-size: 0.9em; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header img { + display: block; + clear: none; + float: right; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header input.submit { + display: block; + clear: none; + float: left; + padding: 6px 8px; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content form input[type='text'].error { + outline: 2px solid black; + outline-color: #cc0000; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.response div.block pre { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + padding: 10px; + font-size: 0.9em; + max-height: 400px; + overflow-y: auto; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading { + background-color: #f9f2e9; + border: 1px solid #f0e0ca; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading h3 span.http_method a { + background-color: #c5862b; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #f0e0ca; + color: #c5862b; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li a { + color: #c5862b; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content { + background-color: #faf5ee; + border: 1px solid #f0e0ca; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content h4 { + color: #c5862b; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content div.sandbox_header a { + color: #dcb67f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading { + background-color: #fcffcd; + border: 1px solid black; + border-color: #ffd20f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading h3 span.http_method a { + text-transform: uppercase; + background-color: #ffd20f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #ffd20f; + color: #ffd20f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li a { + color: #ffd20f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content { + background-color: #fcffcd; + border: 1px solid black; + border-color: #ffd20f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content h4 { + color: #ffd20f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content div.sandbox_header a { + color: #6fc992; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading { + background-color: #f5e8e8; + border: 1px solid #e8c6c7; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading h3 span.http_method a { + text-transform: uppercase; + background-color: #a41e22; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #e8c6c7; + color: #a41e22; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li a { + color: #a41e22; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content { + background-color: #f7eded; + border: 1px solid #e8c6c7; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content h4 { + color: #a41e22; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content div.sandbox_header a { + color: #c8787a; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading { + background-color: #e7f6ec; + border: 1px solid #c3e8d1; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading h3 span.http_method a { + background-color: #10a54a; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #c3e8d1; + color: #10a54a; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li a { + color: #10a54a; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content { + background-color: #ebf7f0; + border: 1px solid #c3e8d1; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content h4 { + color: #10a54a; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content div.sandbox_header a { + color: #6fc992; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading { + background-color: #FCE9E3; + border: 1px solid #F5D5C3; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading h3 span.http_method a { + background-color: #D38042; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #f0cecb; + color: #D38042; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li a { + color: #D38042; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content { + background-color: #faf0ef; + border: 1px solid #f0cecb; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content h4 { + color: #D38042; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content div.sandbox_header a { + color: #dcb67f; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading { + background-color: #e7f0f7; + border: 1px solid #c3d9ec; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading h3 span.http_method a { + background-color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #c3d9ec; + color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li a { + color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content { + background-color: #ebf3f9; + border: 1px solid #c3d9ec; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content h4 { + color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content div.sandbox_header a { + color: #6fa5d2; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading { + background-color: #e7f0f7; + border: 1px solid #c3d9ec; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading h3 span.http_method a { + background-color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #c3d9ec; + color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading ul.options li a { + color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.content { + background-color: #ebf3f9; + border: 1px solid #c3d9ec; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.content h4 { + color: #0f6ab4; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.content div.sandbox_header a { + color: #6fa5d2; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content { + border-top: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li:last-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li.last, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li.last, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li.last, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li.last, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li.last, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li.last { + padding-right: 0; + border-right: none; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li a:hover, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li a:active, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li a.active { + text-decoration: underline; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li:first-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li.first { + padding-left: 0; +} +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations:first-child, +.swagger-section .swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations.first { + padding-left: 0; +} +.swagger-section .swagger-ui-wrap p#colophon { + margin: 0 15px 40px 15px; + padding: 10px 0; + font-size: 0.8em; + border-top: 1px solid #dddddd; + font-family: "Droid Sans", sans-serif; + color: #999999; + font-style: italic; +} +.swagger-section .swagger-ui-wrap p#colophon a { + text-decoration: none; + color: #547f00; +} +.swagger-section .swagger-ui-wrap h3 { + color: black; + font-size: 1.1em; + padding: 10px 0 10px 0; +} +.swagger-section .swagger-ui-wrap .markdown ol, +.swagger-section .swagger-ui-wrap .markdown ul { + font-family: "Droid Sans", sans-serif; + margin: 5px 0 10px; + padding: 0 0 0 18px; + list-style-type: disc; +} +.swagger-section .swagger-ui-wrap form.form_box { + background-color: #ebf3f9; + border: 1px solid #c3d9ec; + padding: 10px; +} +.swagger-section .swagger-ui-wrap form.form_box label { + color: #0f6ab4 !important; +} +.swagger-section .swagger-ui-wrap form.form_box input[type=submit] { + display: block; + padding: 10px; +} +.swagger-section .swagger-ui-wrap form.form_box p.weak { + font-size: 0.8em; +} +.swagger-section .swagger-ui-wrap form.form_box p { + font-size: 0.9em; + padding: 0 0 15px; + color: #7e7b6d; +} +.swagger-section .swagger-ui-wrap form.form_box p a { + color: #646257; +} +.swagger-section .swagger-ui-wrap form.form_box p strong { + color: black; +} +.swagger-section .title { + font-style: bold; +} +.swagger-section .secondary_form { + display: none; +} +.swagger-section .main_image { + display: block; + margin-left: auto; + margin-right: auto; +} +.swagger-section .oauth_body { + margin-left: 100px; + margin-right: 100px; +} +.swagger-section .oauth_submit { + text-align: center; +} +.swagger-section .api-popup-dialog { + z-index: 10000; + position: absolute; + width: 500px; + background: #FFF; + padding: 20px; + border: 1px solid #ccc; + border-radius: 5px; + display: none; + font-size: 13px; + color: #777; +} +.swagger-section .api-popup-dialog .api-popup-title { + font-size: 24px; + padding: 10px 0; +} +.swagger-section .api-popup-dialog .api-popup-title { + font-size: 24px; + padding: 10px 0; +} +.swagger-section .api-popup-dialog p.error-msg { + padding-left: 5px; + padding-bottom: 5px; +} +.swagger-section .api-popup-dialog button.api-popup-authbtn { + height: 30px; +} +.swagger-section .api-popup-dialog button.api-popup-cancel { + height: 30px; +} +.swagger-section .api-popup-scopes { + padding: 10px 20px; +} +.swagger-section .api-popup-scopes li { + padding: 5px 0; + line-height: 20px; +} +.swagger-section .api-popup-scopes .api-scope-desc { + padding-left: 20px; + font-style: italic; +} +.swagger-section .api-popup-scopes li input { + position: relative; + top: 2px; +} +.swagger-section .api-popup-actions { + padding-top: 10px; +} +.swagger-section .access { + float: right; +} +.swagger-section .auth { + float: right; +} +.swagger-section #api_information_panel { + position: absolute; + background: #FFF; + border: 1px solid #ccc; + border-radius: 5px; + display: none; + font-size: 13px; + max-width: 300px; + line-height: 30px; + color: black; + padding: 5px; +} +.swagger-section #api_information_panel p .api-msg-enabled { + color: green; +} +.swagger-section #api_information_panel p .api-msg-disabled { + color: red; +} +.swagger-section .api-ic { + height: 18px; + vertical-align: middle; + display: inline-block; + background: url(../images/explorer_icons.png) no-repeat; +} +.swagger-section .ic-info { + background-position: 0 0; + width: 18px; + margin-top: -7px; + margin-left: 4px; +} +.swagger-section .ic-warning { + background-position: -60px 0; + width: 18px; + margin-top: -7px; + margin-left: 4px; +} +.swagger-section .ic-error { + background-position: -30px 0; + width: 18px; + margin-top: -7px; + margin-left: 4px; +} +.swagger-section .ic-off { + background-position: -90px 0; + width: 58px; + margin-top: -4px; + cursor: pointer; +} +.swagger-section .ic-on { + background-position: -160px 0; + width: 58px; + margin-top: -4px; + cursor: pointer; +} +.swagger-section #header { + background-color: #000000; + padding: 14px; +} +.swagger-section #header a#logo { + font-size: 1.5em; + font-weight: bold; + text-decoration: none; + background: transparent url(../../../img/netflix_logo_v7.png) no-repeat left center; + padding: 20px 0 20px 150px; + color: white; +} +.swagger-section #header form#api_selector { + display: block; + clear: none; + float: right; +} +.swagger-section #header form#api_selector .input { + display: block; + clear: none; + float: left; + margin: 0 10px 0 0; +} +.swagger-section #header form#api_selector .input input#input_apiKey { + width: 200px; +} +.swagger-section #header form#api_selector .input input#input_baseUrl { + width: 400px; +} +.swagger-section #header form#api_selector .input a#explore { + display: block; + text-decoration: none; + font-weight: bold; + padding: 6px 8px; + font-size: 0.9em; + color: white; + background-color: #547f00; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + -o-border-radius: 4px; + -ms-border-radius: 4px; + -khtml-border-radius: 4px; + border-radius: 4px; +} +.swagger-section #header form#api_selector .input a#explore:hover { + background-color: #547f00; +} +.swagger-section #header form#api_selector .input input { + font-size: 0.9em; + padding: 3px; + margin: 0; +} +.swagger-section #content_message { + margin: 10px 15px; + font-style: italic; + color: #999999; +} +.swagger-section #message-bar { + min-height: 30px; + text-align: center; + padding-top: 10px; +} diff --git a/metacat-server/src/main/webapp/docs/api/images/explorer_icons.png b/metacat-server/src/main/webapp/docs/api/images/explorer_icons.png new file mode 100644 index 000000000..ed9d2fffb Binary files /dev/null and b/metacat-server/src/main/webapp/docs/api/images/explorer_icons.png differ diff --git a/metacat-server/src/main/webapp/docs/api/images/logo_small.png b/metacat-server/src/main/webapp/docs/api/images/logo_small.png new file mode 100644 index 000000000..5496a6557 Binary files /dev/null and b/metacat-server/src/main/webapp/docs/api/images/logo_small.png differ diff --git a/metacat-server/src/main/webapp/docs/api/images/pet_store_api.png b/metacat-server/src/main/webapp/docs/api/images/pet_store_api.png new file mode 100644 index 000000000..f9f9cd4ae Binary files /dev/null and b/metacat-server/src/main/webapp/docs/api/images/pet_store_api.png differ diff --git a/metacat-server/src/main/webapp/docs/api/images/throbber.gif b/metacat-server/src/main/webapp/docs/api/images/throbber.gif new file mode 100644 index 000000000..063938892 Binary files /dev/null and b/metacat-server/src/main/webapp/docs/api/images/throbber.gif differ diff --git a/metacat-server/src/main/webapp/docs/api/images/wordnik_api.png b/metacat-server/src/main/webapp/docs/api/images/wordnik_api.png new file mode 100644 index 000000000..dca4f1455 Binary files /dev/null and b/metacat-server/src/main/webapp/docs/api/images/wordnik_api.png differ diff --git a/metacat-server/src/main/webapp/docs/api/index.html b/metacat-server/src/main/webapp/docs/api/index.html new file mode 100644 index 000000000..afbc6e6c5 --- /dev/null +++ b/metacat-server/src/main/webapp/docs/api/index.html @@ -0,0 +1,78 @@ + + + + Metacat API Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+
+ + diff --git a/metacat-server/src/main/webapp/docs/api/lib/backbone-min.js b/metacat-server/src/main/webapp/docs/api/lib/backbone-min.js new file mode 100644 index 000000000..c1c0d4fff --- /dev/null +++ b/metacat-server/src/main/webapp/docs/api/lib/backbone-min.js @@ -0,0 +1,38 @@ +// Backbone.js 0.9.2 + +// (c) 2010-2012 Jeremy Ashkenas, DocumentCloud Inc. +// Backbone may be freely distributed under the MIT license. +// For all details and documentation: +// http://backbonejs.org +(function(){var l=this,y=l.Backbone,z=Array.prototype.slice,A=Array.prototype.splice,g;g="undefined"!==typeof exports?exports:l.Backbone={};g.VERSION="0.9.2";var f=l._;!f&&"undefined"!==typeof require&&(f=require("underscore"));var i=l.jQuery||l.Zepto||l.ender;g.setDomLibrary=function(a){i=a};g.noConflict=function(){l.Backbone=y;return this};g.emulateHTTP=!1;g.emulateJSON=!1;var p=/\s+/,k=g.Events={on:function(a,b,c){var d,e,f,g,j;if(!b)return this;a=a.split(p);for(d=this._callbacks||(this._callbacks= +{});e=a.shift();)f=(j=d[e])?j.tail:{},f.next=g={},f.context=c,f.callback=b,d[e]={tail:g,next:j?j.next:f};return this},off:function(a,b,c){var d,e,h,g,j,q;if(e=this._callbacks){if(!a&&!b&&!c)return delete this._callbacks,this;for(a=a?a.split(p):f.keys(e);d=a.shift();)if(h=e[d],delete e[d],h&&(b||c))for(g=h.tail;(h=h.next)!==g;)if(j=h.callback,q=h.context,b&&j!==b||c&&q!==c)this.on(d,j,q);return this}},trigger:function(a){var b,c,d,e,f,g;if(!(d=this._callbacks))return this;f=d.all;a=a.split(p);for(g= +z.call(arguments,1);b=a.shift();){if(c=d[b])for(e=c.tail;(c=c.next)!==e;)c.callback.apply(c.context||this,g);if(c=f){e=c.tail;for(b=[b].concat(g);(c=c.next)!==e;)c.callback.apply(c.context||this,b)}}return this}};k.bind=k.on;k.unbind=k.off;var o=g.Model=function(a,b){var c;a||(a={});b&&b.parse&&(a=this.parse(a));if(c=n(this,"defaults"))a=f.extend({},c,a);b&&b.collection&&(this.collection=b.collection);this.attributes={};this._escapedAttributes={};this.cid=f.uniqueId("c");this.changed={};this._silent= +{};this._pending={};this.set(a,{silent:!0});this.changed={};this._silent={};this._pending={};this._previousAttributes=f.clone(this.attributes);this.initialize.apply(this,arguments)};f.extend(o.prototype,k,{changed:null,_silent:null,_pending:null,idAttribute:"id",initialize:function(){},toJSON:function(){return f.clone(this.attributes)},get:function(a){return this.attributes[a]},escape:function(a){var b;if(b=this._escapedAttributes[a])return b;b=this.get(a);return this._escapedAttributes[a]=f.escape(null== +b?"":""+b)},has:function(a){return null!=this.get(a)},set:function(a,b,c){var d,e;f.isObject(a)||null==a?(d=a,c=b):(d={},d[a]=b);c||(c={});if(!d)return this;d instanceof o&&(d=d.attributes);if(c.unset)for(e in d)d[e]=void 0;if(!this._validate(d,c))return!1;this.idAttribute in d&&(this.id=d[this.idAttribute]);var b=c.changes={},h=this.attributes,g=this._escapedAttributes,j=this._previousAttributes||{};for(e in d){a=d[e];if(!f.isEqual(h[e],a)||c.unset&&f.has(h,e))delete g[e],(c.silent?this._silent: +b)[e]=!0;c.unset?delete h[e]:h[e]=a;!f.isEqual(j[e],a)||f.has(h,e)!=f.has(j,e)?(this.changed[e]=a,c.silent||(this._pending[e]=!0)):(delete this.changed[e],delete this._pending[e])}c.silent||this.change(c);return this},unset:function(a,b){(b||(b={})).unset=!0;return this.set(a,null,b)},clear:function(a){(a||(a={})).unset=!0;return this.set(f.clone(this.attributes),a)},fetch:function(a){var a=a?f.clone(a):{},b=this,c=a.success;a.success=function(d,e,f){if(!b.set(b.parse(d,f),a))return!1;c&&c(b,d)}; +a.error=g.wrapError(a.error,b,a);return(this.sync||g.sync).call(this,"read",this,a)},save:function(a,b,c){var d,e;f.isObject(a)||null==a?(d=a,c=b):(d={},d[a]=b);c=c?f.clone(c):{};if(c.wait){if(!this._validate(d,c))return!1;e=f.clone(this.attributes)}a=f.extend({},c,{silent:!0});if(d&&!this.set(d,c.wait?a:c))return!1;var h=this,i=c.success;c.success=function(a,b,e){b=h.parse(a,e);if(c.wait){delete c.wait;b=f.extend(d||{},b)}if(!h.set(b,c))return false;i?i(h,a):h.trigger("sync",h,a,c)};c.error=g.wrapError(c.error, +h,c);b=this.isNew()?"create":"update";b=(this.sync||g.sync).call(this,b,this,c);c.wait&&this.set(e,a);return b},destroy:function(a){var a=a?f.clone(a):{},b=this,c=a.success,d=function(){b.trigger("destroy",b,b.collection,a)};if(this.isNew())return d(),!1;a.success=function(e){a.wait&&d();c?c(b,e):b.trigger("sync",b,e,a)};a.error=g.wrapError(a.error,b,a);var e=(this.sync||g.sync).call(this,"delete",this,a);a.wait||d();return e},url:function(){var a=n(this,"urlRoot")||n(this.collection,"url")||t(); +return this.isNew()?a:a+("/"==a.charAt(a.length-1)?"":"/")+encodeURIComponent(this.id)},parse:function(a){return a},clone:function(){return new this.constructor(this.attributes)},isNew:function(){return null==this.id},change:function(a){a||(a={});var b=this._changing;this._changing=!0;for(var c in this._silent)this._pending[c]=!0;var d=f.extend({},a.changes,this._silent);this._silent={};for(c in d)this.trigger("change:"+c,this,this.get(c),a);if(b)return this;for(;!f.isEmpty(this._pending);){this._pending= +{};this.trigger("change",this,a);for(c in this.changed)!this._pending[c]&&!this._silent[c]&&delete this.changed[c];this._previousAttributes=f.clone(this.attributes)}this._changing=!1;return this},hasChanged:function(a){return!arguments.length?!f.isEmpty(this.changed):f.has(this.changed,a)},changedAttributes:function(a){if(!a)return this.hasChanged()?f.clone(this.changed):!1;var b,c=!1,d=this._previousAttributes,e;for(e in a)if(!f.isEqual(d[e],b=a[e]))(c||(c={}))[e]=b;return c},previous:function(a){return!arguments.length|| +!this._previousAttributes?null:this._previousAttributes[a]},previousAttributes:function(){return f.clone(this._previousAttributes)},isValid:function(){return!this.validate(this.attributes)},_validate:function(a,b){if(b.silent||!this.validate)return!0;var a=f.extend({},this.attributes,a),c=this.validate(a,b);if(!c)return!0;b&&b.error?b.error(this,c,b):this.trigger("error",this,c,b);return!1}});var r=g.Collection=function(a,b){b||(b={});b.model&&(this.model=b.model);b.comparator&&(this.comparator=b.comparator); +this._reset();this.initialize.apply(this,arguments);a&&this.reset(a,{silent:!0,parse:b.parse})};f.extend(r.prototype,k,{model:o,initialize:function(){},toJSON:function(a){return this.map(function(b){return b.toJSON(a)})},add:function(a,b){var c,d,e,g,i,j={},k={},l=[];b||(b={});a=f.isArray(a)?a.slice():[a];c=0;for(d=a.length;c=b))this.iframe=i('