org.apache.hadoop.ipc.Server.start()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(10.5k)|赞(0)|评价(0)|浏览(166)

本文整理了Java中org.apache.hadoop.ipc.Server.start()方法的一些代码示例,展示了Server.start()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Server.start()方法的具体详情如下:
包路径:org.apache.hadoop.ipc.Server
类名称:Server
方法名:start

Server.start介绍

[英]Starts the service. Must be called before any calls will be handled.
[中]启动服务。必须先打电话才能处理任何电话。

代码示例

代码示例来源:origin: Qihoo360/XLearning

@Override
public void start() {
 LOG.info("Starting application containers handler server");
 RPC.Builder builder = new RPC.Builder(getConfig());
 builder.setProtocol(ApplicationContainerProtocol.class);
 builder.setInstance(this);
 builder.setBindAddress("0.0.0.0");
 builder.setPort(0);
 try {
  server = builder.build();
 } catch (Exception e) {
  LOG.error("Error starting application containers handler server!", e);
  e.printStackTrace();
  return;
 }
 server.start();
 containerTimeoutMonitor = new Thread(new TimeoutMonitor());
 containerTimeoutMonitor.setName("Container-timeout-monitor");
 containerTimeoutMonitor.setDaemon(true);
 containerTimeoutMonitor.start();
 LOG.info("Container timeout monitor thread had started");
}

代码示例来源:origin: Qihoo360/XLearning

@Override
public void start() {
 LOG.info("Starting application message server");
 RPC.Builder builder = new RPC.Builder(getConfig());
 builder.setProtocol(ApplicationMessageProtocol.class);
 builder.setInstance(this);
 builder.setBindAddress("0.0.0.0");
 builder.setPort(0);
 Server server;
 try {
  server = builder.build();
 } catch (Exception e) {
  LOG.error("Error starting message server!", e);
  e.printStackTrace();
  return;
 }
 server.start();
 serverAddress = NetUtils.getConnectAddress(server);
 LOG.info("Started application message server at " + serverAddress);
}

代码示例来源:origin: Qihoo360/XLearning

protected void serviceStart() throws Exception {
 Configuration conf = new XLearningConfiguration();
 YarnRPC rpc = YarnRPC.create(conf);
 initializeWebApp(conf);
 InetSocketAddress address = conf.getSocketAddr(
   XLearningConfiguration.XLEARNING_HISTORY_BIND_HOST,
   XLearningConfiguration.XLEARNING_HISTORY_ADDRESS,
   conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS),
   conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_PORT, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_PORT));
 server =
   rpc.getServer(HSClientProtocol.class, protocolHandler, address,
     conf, jhsDTSecretManager,
     conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_CLIENT_THREAD_COUNT,
       XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_CLIENT_THREAD_COUNT));
 // Enable service authorization?
 if (conf.getBoolean(
   CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
   false)) {
  server.refreshServiceAcl(conf, new ClientHSPolicyProvider());
 }
 server.start();
 this.bindAddress = conf.updateConnectAddr(XLearningConfiguration.XLEARNING_HISTORY_BIND_HOST,
   XLearningConfiguration.XLEARNING_HISTORY_ADDRESS,
   conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS),
   server.getListenerAddress());
 LOG.info("Instantiated HistoryClientService at " + this.bindAddress);
 super.serviceStart();
}

代码示例来源:origin: apache/hive

public LlapTaskUmbilicalServer(Configuration conf, LlapTaskUmbilicalProtocol umbilical, int numHandlers) throws IOException {
 jobTokenSecretManager = new JobTokenSecretManager();
 server = new RPC.Builder(conf)
   .setProtocol(LlapTaskUmbilicalProtocol.class)
   .setBindAddress("0.0.0.0")
   .setPort(0)
   .setInstance(umbilical)
   .setNumHandlers(numHandlers)
   .setSecretManager(jobTokenSecretManager).build();
 if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
  server.refreshServiceAcl(conf, new LlapUmbilicalExternalPolicyProvider());
 }
 server.start();
 this.address = NetUtils.getConnectAddress(server);
 LOG.info(
   "Started TaskUmbilicalServer: " + umbilical.getClass().getName() + " at address: " + address +
   " with numHandlers=" + numHandlers);
}

代码示例来源:origin: apache/hive

@Override
protected void startRpcServer() {
 Configuration conf = getConf();
 try {
  JobTokenSecretManager jobTokenSecretManager =
    new JobTokenSecretManager();
  jobTokenSecretManager.addTokenForJob(tokenIdentifier, sessionToken);
  int numHandlers =
    HiveConf.getIntVar(conf, ConfVars.LLAP_TASK_COMMUNICATOR_LISTENER_THREAD_COUNT);
  server = new RPC.Builder(conf)
    .setProtocol(LlapTaskUmbilicalProtocol.class)
    .setBindAddress("0.0.0.0")
    .setPort(0)
    .setInstance(umbilical)
    .setNumHandlers(numHandlers)
    .setSecretManager(jobTokenSecretManager).build();
  if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
   server.refreshServiceAcl(conf, new LlapUmbilicalPolicyProvider());
  }
  server.start();
  this.address = NetUtils.getConnectAddress(server);
  this.amHost = LlapUtil.getAmHostNameFromAddress(address, conf);
  LOG.info("Started LlapUmbilical: " + umbilical.getClass().getName() + " at address: "
    + address + " with numHandlers=" + numHandlers + " using the host name " + amHost);
 } catch (IOException e) {
  throw new TezUncheckedException(e);
 }
}

代码示例来源:origin: org.apache.slider/slider-core

@Override
protected void serviceStart() throws Exception {
 super.serviceStart();
 server.start();
}

代码示例来源:origin: apache/incubator-slider

@Override
protected void serviceStart() throws Exception {
 super.serviceStart();
 server.start();
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

private void initIpcServer(Configuration conf) throws IOException {
 //init ipc server
 InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
   conf.get("dfs.datanode.ipc.address"));
 ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), 
   conf.getInt("dfs.datanode.handler.count", 3), false, conf);
 ipcServer.start();
}

代码示例来源:origin: org.apache.slider/slider-core

@Override
public synchronized void start() {
 if (failOnStart) {
  throw new RuntimeException("failOnStart");
 }
 started = true;
 super.start();
}

代码示例来源:origin: apache/incubator-slider

@Override
public synchronized void start() {
 if (failOnStart) {
  throw new RuntimeException("failOnStart");
 }
 started = true;
 super.start();
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/** Start a single datanode daemon and wait for it to finish.
 *  If this thread is specifically interrupted, it will stop waiting.
 */
public void runDatanodeDaemon() throws IOException {
 namespaceManager.startAll();
 // start dataXceiveServer
 dataXceiverServer.start();
 ipcServer.start();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-nodemanager

@Override
public void serviceStart() throws Exception {
 cacheCleanup.scheduleWithFixedDelay(new CacheCleanup(dispatcher),
   cacheCleanupPeriod, cacheCleanupPeriod, TimeUnit.MILLISECONDS);
 server = createServer();
 server.start();
 localizationServerAddress =
   getConfig().updateConnectAddr(YarnConfiguration.NM_BIND_HOST,
                  YarnConfiguration.NM_LOCALIZER_ADDRESS,
                  YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
                  server.getListenerAddress());
 LOG.info("Localizer started on port " + server.getPort());
 super.serviceStart();
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-nodemanager

@Override
public void serviceStart() throws Exception {
 cacheCleanup.scheduleWithFixedDelay(new CacheCleanup(dispatcher),
   cacheCleanupPeriod, cacheCleanupPeriod, TimeUnit.MILLISECONDS);
 server = createServer();
 server.start();
 localizationServerAddress =
   getConfig().updateConnectAddr(YarnConfiguration.NM_BIND_HOST,
                  YarnConfiguration.NM_LOCALIZER_ADDRESS,
                  YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
                  server.getListenerAddress());
 LOG.info("Localizer started on port " + server.getPort());
 super.serviceStart();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

/** Start RPC server */
MiniServer(Configuration conf, String user, String keytabFile)
throws IOException {
 UserGroupInformation.setConfiguration(conf);
 UserGroupInformation.loginUserFromKeytab(user, keytabFile);
 secretManager = 
  new TestDelegationTokenSecretManager(24*60*60*1000,
    7*24*60*60*1000,24*60*60*1000,3600000);
 secretManager.startThreads();
 rpcServer = RPC.getServer(MiniProtocol.class,
   this, DEFAULT_SERVER_ADDRESS, 0, 1, false, conf, secretManager);
 rpcServer.start();
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Test(timeout=30000, expected=IOException.class)
public void testIpcAfterStopping() throws IOException {
 // start server
 Server server = new TestServer(5, false);
 InetSocketAddress addr = NetUtils.getConnectAddress(server);
 server.start();
 // start client
 Client client = new Client(LongWritable.class, conf);
 client.call(new LongWritable(RANDOM.nextLong()),
   addr, null, null, MIN_SLEEP_TIME, 0, conf);
 client.stop();
 // This call should throw IOException.
 client.call(new LongWritable(RANDOM.nextLong()),
   addr, null, null, MIN_SLEEP_TIME, 0, conf);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

@Override
protected void serviceStart() throws Exception {
 // All the clients to appsManager are supposed to be authenticated via
 // Kerberos if security is enabled, so no secretManager.
 YarnRPC rpc = YarnRPC.create(getConfig());
 Configuration clientServerConf = new Configuration(getConfig());
 this.server = rpc.getServer(ApplicationClientProtocol.class, this,
   clientBindAddress, clientServerConf, null, 1);
 this.server.start();
 super.serviceStart();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-common

@BeforeClass
public static void start() {
 InetSocketAddress address = new InetSocketAddress(0);
 Configuration configuration = new Configuration();
 ResourceTracker instance = new ResourceTrackerTestImpl();
 server = RpcServerFactoryPBImpl.get().getServer(ResourceTracker.class,
   instance, address, configuration, null, 1);
 server.start();
 client = (ResourceTracker) RpcClientFactoryPBImpl.get().getClient(
   ResourceTracker.class, 1, NetUtils.getConnectAddress(server),
   configuration);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Override
 public Server run() throws IOException {
  Server server = new RPC.Builder(serverConf)
  .setProtocol(TestSaslProtocol.class)
  .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
  .setNumHandlers(5).setVerbose(true)
  .setSecretManager(serverSm)
  .build();      
  server.start();
  return server;
 }
});

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Override
 public Server run() throws IOException {
  Server server = new RPC.Builder(serverConf)
  .setProtocol(TestSaslProtocol.class)
  .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
  .setNumHandlers(5).setVerbose(true)
  .setSecretManager(serverSm)
  .build();      
  server.start();
  return server;
 }
});

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/** Start RPC server */
MiniServer(Configuration conf, String user, String keytabFile)
throws IOException {
 UserGroupInformation.setConfiguration(conf);
 UserGroupInformation.loginUserFromKeytab(user, keytabFile);
 secretManager = 
  new TestDelegationTokenSecretManager(24*60*60*1000,
    7*24*60*60*1000,24*60*60*1000,3600000);
 secretManager.startThreads();
 rpcServer = new RPC.Builder(conf).setProtocol(MiniProtocol.class)
   .setInstance(this).setBindAddress(DEFAULT_SERVER_ADDRESS).setPort(0)
   .setNumHandlers(1).setVerbose(false).setSecretManager(secretManager)
   .build();
 rpcServer.start();
}

相关文章