与cloudera hbase 1.0.0集成时的java依赖冲突

pb3skfrl  于 2021-06-09  发布在  Hbase
关注(0)|答案(2)|浏览(404)

我尝试将我的play framework(2.4.2)web应用程序连接到cloudera hbase集群。我在bulid.sbt文件中包含了hbase依赖项,并使用hbase示例代码将单元格插入表中。然而,我得到了一个例外,它似乎是play框架和hbase之间的依赖冲突。我还附上了我的示例代码和build.sbt文件。如果您能帮助我解决这个错误,我将不胜感激。

[ERROR] [07/21/2015 12:03:05.919] [application-akka.actor.default-dispatcher-5] [ActorSystem(application)] Uncaught fatal error from thread [application-akka.actor.default-dispatcher-5] shutting down ActorSystem [application]
    java.lang.IllegalAccessError: tried to access method com.google.common.base.Stopwatch.<init>()V from class org.apache.hadoop.hbase.zookeeper.MetaTableLocator
        at org.apache.hadoop.hbase.zookeeper.MetaTableLocator.blockUntilAvailable(MetaTableLocator.java:434)
        at org.apache.hadoop.hbase.client.ZooKeeperRegistry.getMetaRegionLocation(ZooKeeperRegistry.java:60)
        at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1123)
        at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1110)
        at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegionInMeta(ConnectionManager.java:1262)
        at org.apache.hadoop.hbase.client.ConnectionManager$HConnectionImplementation.locateRegion(ConnectionManager.java:1126)
        at org.apache.hadoop.hbase.client.AsyncProcess.submit(AsyncProcess.java:369)
        at org.apache.hadoop.hbase.client.AsyncProcess.submit(AsyncProcess.java:320)
        at org.apache.hadoop.hbase.client.BufferedMutatorImpl.backgroundFlushCommits(BufferedMutatorImpl.java:206)
        at org.apache.hadoop.hbase.client.BufferedMutatorImpl.flush(BufferedMutatorImpl.java:183)
        at org.apache.hadoop.hbase.client.HTable.flushCommits(HTable.java:1496)
        at org.apache.hadoop.hbase.client.HTable.put(HTable.java:1107)
        at controllers.Application.index(Application.java:44)
        at router.Routes$$anonfun$routes$1$$anonfun$applyOrElse$1$$anonfun$apply$1.apply(Routes.scala:95)
        at router.Routes$$anonfun$routes$1$$anonfun$applyOrElse$1$$anonfun$apply$1.apply(Routes.scala:95)
        at play.core.routing.HandlerInvokerFactory$$anon$4.resultCall(HandlerInvoker.scala:136)
        at play.core.routing.HandlerInvokerFactory$JavaActionInvokerFactory$$anon$14$$anon$3$$anon$1.invocation(HandlerInvoker.scala:127)
        at play.core.j.JavaAction$$anon$1.call(JavaAction.scala:70)
        at play.http.DefaultHttpRequestHandler$1.call(DefaultHttpRequestHandler.java:20)
        at play.core.j.JavaAction$$anonfun$7.apply(JavaAction.scala:94)
        at play.core.j.JavaAction$$anonfun$7.apply(JavaAction.scala:94)
        at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
        at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
        at play.core.j.HttpExecutionContext$$anon$2.run(HttpExecutionContext.scala:40)
        at play.api.libs.iteratee.Execution$trampoline$.execute(Execution.scala:70)
        at play.core.j.HttpExecutionContext.execute(HttpExecutionContext.scala:32)
        at scala.concurrent.impl.Future$.apply(Future.scala:31)
        at scala.concurrent.Future$.apply(Future.scala:492)
        at play.core.j.JavaAction.apply(JavaAction.scala:94)
        at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4$$anonfun$apply$5.apply(Action.scala:105)
        at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4$$anonfun$apply$5.apply(Action.scala:105)
        at play.utils.Threads$.withContextClassLoader(Threads.scala:21)
        at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4.apply(Action.scala:104)
        at play.api.mvc.Action$$anonfun$apply$1$$anonfun$apply$4.apply(Action.scala:103)
        at scala.Option.map(Option.scala:146)
        at play.api.mvc.Action$$anonfun$apply$1.apply(Action.scala:103)
        at play.api.mvc.Action$$anonfun$apply$1.apply(Action.scala:96)
        at play.api.libs.iteratee.Iteratee$$anonfun$mapM$1.apply(Iteratee.scala:524)
        at play.api.libs.iteratee.Iteratee$$anonfun$mapM$1.apply(Iteratee.scala:524)
        at play.api.libs.iteratee.Iteratee$$anonfun$flatMapM$1.apply(Iteratee.scala:560)
        at play.api.libs.iteratee.Iteratee$$anonfun$flatMapM$1.apply(Iteratee.scala:560)
        at play.api.libs.iteratee.Iteratee$$anonfun$flatMap$1$$anonfun$apply$13.apply(Iteratee.scala:536)
        at play.api.libs.iteratee.Iteratee$$anonfun$flatMap$1$$anonfun$apply$13.apply(Iteratee.scala:536)
        at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
        at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
        at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:40)
        at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397)
        at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
        at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
        at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
        at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)

这是我的bulid.sbt文件:

name := """HbaseTest"""

version := "1.0-SNAPSHOT"

lazy val root = (project in file(".")).enablePlugins(PlayJava)

scalaVersion := "2.11.6"

libraryDependencies ++= Seq(
  javaJdbc,
  cache,
  javaWs
)
//hbase
libraryDependencies +="org.apache.hbase" % "hbase-client" % "1.0.0-cdh5.4.4" 
libraryDependencies +="org.apache.hbase" % "hbase-annotations" % "1.0.0-cdh5.4.4" 
libraryDependencies +="org.apache.hbase" % "hbase-common" % "1.0.0-cdh5.4.4"
libraryDependencies +="org.apache.hbase" % "hbase-protocol" % "1.0.0-cdh5.4.4" 
//hadoop
libraryDependencies +="org.apache.hadoop" % "hadoop-common"%"2.6.0-cdh5.4.4" 
libraryDependencies +="org.apache.hadoop" % "hadoop-annotations"%"2.6.0-cdh5.4.4" 
libraryDependencies +="org.apache.hadoop" % "hadoop-auth"%"2.6.0-cdh5.4.4"
// Play provides two styles of routers, one expects its actions to be injected, the
// other, legacy style, accesses its actions statically.
routesGenerator := InjectedRoutesGenerator

这是我的密码:

package controllers;

import play.*;
import play.mvc.*;
import views.html.*;

import java.io.IOException;
import java.util.HashMap;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.util.Bytes;
public class Application extends Controller {

    public Result index() {       
          String ZooKeeperIP = "10.12.7.43";
          String ZooKeeperPort = "2181";
          String HBaseMaster = "10.12.7.43:60000";
          Configuration hBaseConfig;
          Connection  connection = null;
          //TableName TABLE_NAME = "sample";
          hBaseConfig =  HBaseConfiguration.create();
            hBaseConfig.set("hbase.zookeeper.quorum",ZooKeeperIP);
            hBaseConfig.set("hbase.zookeeper.property.clientPort", ZooKeeperPort);
            hBaseConfig.set("hbase.master", HBaseMaster);

            //connection = ConnectionFactory.createConnection(hBaseConfig);

            try {
                connection = ConnectionFactory.createConnection(hBaseConfig);
                HTable table = new HTable(hBaseConfig, "sample");
                Put p = new Put(Bytes.toBytes("1"));
                p.add(Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("4"));
                table.put(p);
            }catch (Exception e) {
                e.printStackTrace();
                System.out.println(e.getMessage());         
            }
        return ok(index.render("Your new application is ready."));
    }

}
fsi0uk1n

fsi0uk1n1#

如我所见,问题在于依赖性。
明确地 guava 图书馆(这是 hadoop ). Play 使用更新版本的 guava 如我所见。它没有 StopWatch 哪个班 hbase 要求。
你可以用多种方法来解决这个问题(不幸的是,我知道的所有方法都是“黑客式的”)。
简单的方法是使用类似 zipkin . 在这里我们将添加 StopWatch 我们自己。
另一种方法是以某种方式分离hbase操作(这将需要大量的工作和设计变更)
如果你能 sbt 支持'阴影',因为我知道它还没有。
你仍然可以使用 sbt 像这样的努力 spark 处理类似的问题。

epggiuax

epggiuax2#

我也有类似的问题。我在一个项目中使用了spring、hadoop和hbase。我可以通过向pom.xml中显式添加guava lib来解决这个问题。版本必须小于17(我使用了16.0.1)
我有更多关于这个地方的信息:https://github.com/thinkaurelius/titan/issues/1236

相关问题