org.apache.accumulo.core.client.Instance类的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(17.2k)|赞(0)|评价(0)|浏览(117)

本文整理了Java中org.apache.accumulo.core.client.Instance类的一些代码示例,展示了Instance类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Instance类的具体详情如下:
包路径:org.apache.accumulo.core.client.Instance
类名称:Instance

Instance介绍

[英]This class represents the information a client needs to know to connect to an instance of accumulo.
[中]此类表示客户端连接到accumulo实例所需的信息。

代码示例

代码示例来源:origin: prestodb/presto

Connector conn = inst.getConnector("root", new PasswordToken(""));
conn.tableOperations().create(table.getFullTableName());
conn.tableOperations().create(table.getIndexTableName());
conn.tableOperations().create(table.getMetricsTableName());
Scanner scan = conn.createScanner(table.getIndexTableName(), new Authorizations());
scan.setRange(new Range());
Iterator<Entry<Key, Value>> iter = scan.iterator();
assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", "");
assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", "");
assertFalse(iter.hasNext());
scan.close();
scan = conn.createScanner(table.getMetricsTableName(), new Authorizations());

代码示例来源:origin: prestodb/presto

public static synchronized DistributedQueryRunner createAccumuloQueryRunner(Map<String, String> extraProperties)
    throws Exception
{
  DistributedQueryRunner queryRunner =
      new DistributedQueryRunner(createSession(), 4, extraProperties);
  queryRunner.installPlugin(new TpchPlugin());
  queryRunner.createCatalog("tpch", "tpch");
  queryRunner.installPlugin(new AccumuloPlugin());
  Map<String, String> accumuloProperties =
      ImmutableMap.<String, String>builder()
          .put(AccumuloConfig.INSTANCE, connector.getInstance().getInstanceName())
          .put(AccumuloConfig.ZOOKEEPERS, connector.getInstance().getZooKeepers())
          .put(AccumuloConfig.USERNAME, MAC_USER)
          .put(AccumuloConfig.PASSWORD, MAC_PASSWORD)
          .put(AccumuloConfig.ZOOKEEPER_METADATA_ROOT, "/presto-accumulo-test")
          .build();
  queryRunner.createCatalog("accumulo", "accumulo", accumuloProperties);
  if (!tpchLoaded) {
    copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), TpchTable.getTables());
    connector.tableOperations().addSplits("tpch.orders", ImmutableSortedSet.of(new Text(new LexicoderRowSerializer().encode(BIGINT, 7500L))));
    tpchLoaded = true;
  }
  return queryRunner;
}

代码示例来源:origin: org.apache.rya/rya.mapreduce

Connector connector = instance.getConnector(user, password);
TableOperations tos = connector.tableOperations();
String tableId = tos.tableIdMap().get(table);
Scanner scanner = connector.createScanner("accumulo.metadata", Authorizations.EMPTY); //TODO: auths?
scanner.setRange(new Range(new Text(tableId + "\u0000"), new Text(tableId + "\uFFFD")));
scanner.fetchColumnFamily(new Text("file"));
List<String> files = new ArrayList<String>();
List<InputSplit> fileSplits = new ArrayList<InputSplit>();

代码示例来源:origin: org.apache.accumulo/accumulo-test

@Test
public void test() throws Exception {
 Scanner scanner = conn.createScanner(tableName, new Authorizations());
 scanner.fetchColumn(new Text("dir"), new Text("counts"));
 assertFalse(scanner.iterator().hasNext());
 opts.instance = conn.getInstance().getInstanceName();
 opts.zookeepers = conn.getInstance().getZooKeepers();
 opts.setTableName(tableName);
 opts.setPrincipal(conn.whoami());
 expected.add(new Pair<>(QueryUtil.getRow("").toString(), "1,0,3,3"));
 expected.add(new Pair<>(QueryUtil.getRow("/local").toString(), "2,1,2,3"));
 expected.add(new Pair<>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));

代码示例来源:origin: org.apache.hive/hive-accumulo-handler

protected AccumuloRecordWriter(JobConf job)
  throws AccumuloException, AccumuloSecurityException, IOException {
 this.isStringEncoded = AccumuloIndexedOutputFormat.getStringEncoding(job).booleanValue();
 this.simulate = AccumuloIndexedOutputFormat.getSimulationMode(job).booleanValue();
 this.createTables = AccumuloIndexedOutputFormat.canCreateTables(job).booleanValue();
 if (this.simulate) {
  LOG.info("Simulating output only. No writes to tables will occur");
 }
 this.bws = new HashMap();
 String tname = AccumuloIndexedOutputFormat.getDefaultTableName(job);
 this.defaultTableName = tname == null ? null : new Text(tname);
 String iname = AccumuloIndexedOutputFormat.getIndexTableName(job);
 if (iname != null) {
  LOG.info("Index Table = {}", iname);
  this.indexTableName = new Text(iname);
  this.indexDef = createIndexDefinition(job, tname, iname);
 }
 if (!this.simulate) {
  this.conn = AccumuloIndexedOutputFormat.getInstance(job)
    .getConnector(AccumuloIndexedOutputFormat.getPrincipal(job),
           AccumuloIndexedOutputFormat.getAuthenticationToken(job));
  this.mtbw = this.conn.createMultiTableBatchWriter(
    AccumuloIndexedOutputFormat.getBatchWriterOptions(job));
 }
}

代码示例来源:origin: JHUAPL/AccumuloGraph

@Override
public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
 super.initialize(inSplit, attempt);
 rowIterator = new RowIterator(scannerIterator);
 currentK = new Text();
 try {
  conf = new AccumuloGraphConfiguration();
  conf.setZooKeeperHosts(EdgeInputFormat.getInstance(attempt).getZooKeepers());
  conf.setInstanceName(EdgeInputFormat.getInstance(attempt).getInstanceName());
  conf.setUser(EdgeInputFormat.getPrincipal(attempt));
  conf.setTokenWithFallback(EdgeInputFormat.getToken(attempt));
  conf.setGraphName(attempt.getConfiguration().get(GRAPH_NAME));
  if (EdgeInputFormat.getInstance(attempt) instanceof MockInstance) {
   conf.setInstanceType(InstanceType.Mock);
  }
  parent = AccumuloGraph.open(conf.getConfiguration());
 } catch (AccumuloException e) {
  throw new AccumuloGraphException(e);
 }
}

代码示例来源:origin: org.apache.accumulo/accumulo-test

static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException,
   AccumuloSecurityException, TableExistsException, TableNotFoundException,
   MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
  c.tableOperations().create(tablename);
  BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
  for (int i = 0; i < 10; i++) {
   Mutation m = new Mutation("" + i);
   m.put(input_cf, input_cq, "row" + i);
   bw.addMutation(m);
  }
  bw.close();
  Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i",
    c.getInstance().getInstanceName(), "-z", c.getInstance().getZooKeepers(), "-u", "root",
    "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
  assertEquals(0, hash.waitFor());

  Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
  s.fetchColumn(new Text(input_cf), new Text(output_cq));
  int i = 0;
  for (Entry<Key,Value> entry : s) {
   MessageDigest md = MessageDigest.getInstance("MD5");
   byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
   assertEquals(entry.getValue().toString(), new String(check));
   i++;
  }

 }
}

代码示例来源:origin: org.apache.accumulo/accumulo-test

private long ingest() throws Exception {
 final Connector c = getConnector();
 final String tableName = getUniqueNames(1)[0];
 log.info("Creating the table");
 c.tableOperations().create(tableName);
 log.info("Splitting the table");
 final long SPLIT_COUNT = 100;
 final long distance = Long.MAX_VALUE / SPLIT_COUNT;
 final SortedSet<Text> splits = new TreeSet<>();
 for (int i = 1; i < SPLIT_COUNT; i++) {
  splits.add(new Text(String.format("%016x", i * distance)));
 }
 c.tableOperations().addSplits(tableName, splits);
 log.info("Waiting for balance");
 c.instanceOperations().waitForBalance();
 final Instance inst = c.getInstance();
 log.info("Starting ingest");
 final long start = System.currentTimeMillis();
 final String args[] = {"-i", inst.getInstanceName(), "-z", inst.getZooKeepers(), "-u", "root",
   "-p", ROOT_PASSWORD, "--batchThreads", "2", "--table", tableName, "--num",
   Long.toString(50 * 1000), // 50K 100 byte entries
 };
 ContinuousIngest.main(args);
 final long result = System.currentTimeMillis() - start;
 log.debug(String.format("Finished in %,d ms", result));
 log.debug("Dropping table");
 c.tableOperations().delete(tableName);
 return result;
}

代码示例来源:origin: NationalSecurityAgency/timely

public static void main(String[] args) throws Exception {

    try (ConfigurableApplicationContext ctx = new SpringApplicationBuilder(SpringBootstrap.class)
        .bannerMode(Mode.OFF).web(false).run(args)) {
      Configuration conf = ctx.getBean(Configuration.class);

      final BaseConfiguration apacheConf = new BaseConfiguration();
      Accumulo accumuloConf = conf.getAccumulo();
      apacheConf.setProperty("instance.name", accumuloConf.getInstanceName());
      apacheConf.setProperty("instance.zookeeper.host", accumuloConf.getZookeepers());
      final ClientConfiguration aconf = new ClientConfiguration(Collections.singletonList(apacheConf));
      final Instance instance = new ZooKeeperInstance(aconf);
      Connector con = instance.getConnector(accumuloConf.getUsername(),
          new PasswordToken(accumuloConf.getPassword()));
      Scanner s = con.createScanner(conf.getMetaTable(),
          con.securityOperations().getUserAuthorizations(con.whoami()));
      try {
        s.setRange(new Range(Meta.METRIC_PREFIX, true, Meta.TAG_PREFIX, false));
        for (Entry<Key, Value> e : s) {
          System.out.println(e.getKey().getRow().toString().substring(Meta.METRIC_PREFIX.length()));
        }
      } finally {
        s.close();
      }
    }
  }
}

代码示例来源:origin: org.apache.accumulo/accumulo-server

private Iterator<Entry<Key,Value>> getIter2() {
 if (iter2 == null) {
  try {
   if ((table == null || !table.equals(Constants.METADATA_TABLE_ID)) && iter1Count == 0) {
    Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
    Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
    scanner.setTimeout(3, TimeUnit.SECONDS);
    if (table == null) {
     scanner.setRange(new Range(new Text("~err_"), false, new Text("~err`"), false));
    } else {
     scanner.setRange(new Range(new Text("~err_" + table)));
    }
    iter2 = scanner.iterator();
   } else {
    Map<Key,Value> m = Collections.emptyMap();
    iter2 = m.entrySet().iterator();
   }
  } catch (Exception e) {
   throw new RuntimeException(e);
  }
 }
 return iter2;
}

代码示例来源:origin: org.apache.accumulo/accumulo-server

public void deleteProblemReports(String table) throws Exception {
 if (Constants.METADATA_TABLE_ID.equals(table)) {
  Iterator<ProblemReport> pri = iterator(table);
  while (pri.hasNext()) {
   pri.next().removeFromZooKeeper();
  }
  return;
 }
 Connector connector = HdfsZooInstance.getInstance().getConnector(SecurityConstants.getSystemPrincipal(), SecurityConstants.getSystemToken());
 Scanner scanner = connector.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS);
 scanner.addScanIterator(new IteratorSetting(1, "keys-only", SortedKeyIterator.class));
 if (table == null) {
  scanner.setRange(new Range(new Text("~err_"), false, new Text("~err`"), false));
 } else {
  scanner.setRange(new Range(new Text("~err_" + table)));
 }
 Mutation delMut = new Mutation(new Text("~err_" + table));
 boolean hasProblems = false;
 for (Entry<Key,Value> entry : scanner) {
  hasProblems = true;
  delMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
 }
 if (hasProblems)
  MetadataTable.getMetadataTable(SecurityConstants.getSystemCredentials()).update(delMut);
}

代码示例来源:origin: apache/sqoop

Instance inst = new ZooKeeperInstance(accumuloCluster.getInstanceName(),
 accumuloCluster.getZooKeepers());
Connector conn = inst.getConnector(ACCUMULO_USER,
 new PasswordToken(ACCUMULO_PASSWORD));
Scanner scanner = conn.createScanner(tableName, Constants.NO_AUTHS);
scanner.setRange(new Range(rowKey));
Iterator<Entry<Key, Value>> iter = scanner.iterator();
while (iter.hasNext()) {
 Entry<Key, Value> entry = iter.next();
 String columnFamily = entry.getKey().getColumnFamily().toString();
 String qual = entry.getKey().getColumnQualifier().toString();
 if (columnFamily.equals(colFamily)
   && qual.equals(colName)) {

代码示例来源:origin: org.apache.accumulo/accumulo-server

public void clearMergeState(Text tableId) throws IOException, KeeperException, InterruptedException {
 synchronized (mergeLock) {
  String path = ZooUtil.getRoot(instance.getInstanceID()) + Constants.ZTABLES + "/" + tableId.toString() + "/merge";
  ZooReaderWriter.getInstance().recursiveDelete(path, NodeMissingPolicy.SKIP);
  mergeLock.notifyAll();
 }
 nextEvent.event("Merge state of %s cleared", tableId);
}

代码示例来源:origin: NationalSecurityAgency/datawave

this.defaultTableName = (tname == null) ? null : new Text(tname);
      this.conn = getInstance(conf).getConnector(getUsername(conf), new PasswordToken(getPassword(conf)));
    } else {
      this.connFactory = connectionFactory;
      this.conn = connectionFactory.getConnection(Priority.ADMIN, trackingMap);
    mtbw = conn.createMultiTableBatchWriter(getMaxMutationBufferSize(conf), getMaxLatency(conf), getMaxWriteThreads(conf));
  } catch (Exception e) {
    log.error(e.getMessage(), e);

代码示例来源:origin: org.apache.accumulo/accumulo-core

public DelegationTokenImpl(Instance instance, UserGroupInformation user,
  AuthenticationTokenIdentifier identifier) {
 requireNonNull(instance);
 requireNonNull(user);
 requireNonNull(identifier);
 Credentials creds = user.getCredentials();
 Token<? extends TokenIdentifier> token = creds
   .getToken(new Text(SERVICE_NAME + "-" + instance.getInstanceID()));
 if (null == token) {
  throw new IllegalArgumentException(
    "Did not find Accumulo delegation token in provided UserGroupInformation");
 }
 setPasswordFromToken(token, identifier);
}

代码示例来源:origin: org.apache.accumulo/accumulo-core

public OfflineIterator(ScannerOptions options, Instance instance, Credentials credentials,
  Authorizations authorizations, Text table, Range range) {
 this.options = new ScannerOptions(options);
 this.instance = instance;
 this.range = range;
 if (this.options.fetchedColumns.size() > 0) {
  this.range = range.bound(this.options.fetchedColumns.first(),
    this.options.fetchedColumns.last());
 }
 this.tableId = table.toString();
 this.authorizations = authorizations;
 this.readers = new ArrayList<>();
 try {
  conn = instance.getConnector(credentials.getPrincipal(), credentials.getToken());
  config = new ConfigurationCopy(conn.instanceOperations().getSiteConfiguration());
  nextTablet();
  while (iter != null && !iter.hasTop())
   nextTablet();
 } catch (Exception e) {
  if (e instanceof RuntimeException)
   throw (RuntimeException) e;
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: org.apache.accumulo/accumulo-monitor

private Scanner getScanner(String table, String principal, AuthenticationToken at,
   StringBuilder sb) throws AccumuloException, AccumuloSecurityException {
  try {
   Connector conn = HdfsZooInstance.getInstance().getConnector(principal, at);
   if (!conn.tableOperations().exists(table)) {
    return new NullScanner();
   }
   Scanner scanner = conn.createScanner(table,
     conn.securityOperations().getUserAuthorizations(principal));
   return scanner;
  } catch (AccumuloSecurityException ex) {
   sb.append("<h2>Unable to read trace table: check trace username "
     + "and password configuration.</h2>\n");
   return null;
  } catch (TableNotFoundException ex) {
   return new NullScanner();
  }
 }
}

代码示例来源:origin: prestodb/presto

@Override
  public Connector get()
  {
    try {
      Instance inst = new ZooKeeperInstance(instance, zooKeepers);
      Connector connector = inst.getConnector(username, new PasswordToken(password.getBytes(UTF_8)));
      LOG.info("Connection to instance %s at %s established, user %s", instance, zooKeepers, username);
      return connector;
    }
    catch (AccumuloException | AccumuloSecurityException e) {
      throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get connector to Accumulo", e);
    }
  }
}

代码示例来源:origin: org.apache.accumulo/accumulo-test

@Override
 public void run() {
  try {
   KeyExtent extent = new KeyExtent(row, (Text) null);
   Connector connector = HdfsZooInstance.getInstance().getConnector(principal, token);
   Scanner mdScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
   Text row = extent.getMetadataEntry();
   mdScanner.setRange(new Range(row));
   for (Entry<Key,Value> entry : mdScanner) {
    if (!entry.getKey().getRow().equals(row))
     break;
   }
  } catch (TableNotFoundException e) {
   log.error("Table '" + MetadataTable.NAME + "' not found.", e);
   throw new RuntimeException(e);
  } catch (AccumuloException e) {
   log.error("AccumuloException encountered.", e);
   throw new RuntimeException(e);
  } catch (AccumuloSecurityException e) {
   log.error("AccumuloSecurityException encountered.", e);
   throw new RuntimeException(e);
  }
 }
}

代码示例来源:origin: org.apache.accumulo/accumulo-test

@Test
public void tserverReplicationServicePortsAreAdvertised() throws Exception {
 // Wait for the cluster to be up
 Connector conn = getConnector();
 Instance inst = conn.getInstance();
 // Wait for a tserver to come up to fulfill this request
 conn.tableOperations().create("foo");
 Scanner s = conn.createScanner("foo", Authorizations.EMPTY);
 Assert.assertEquals(0, Iterables.size(s));
 ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
 Set<String> tserverHost = new HashSet<>();
 tserverHost.addAll(zreader.getChildren(ZooUtil.getRoot(inst) + Constants.ZTSERVERS));
 Set<HostAndPort> replicationServices = new HashSet<>();
 for (String tserver : tserverHost) {
  try {
   byte[] portData = zreader.getData(
     ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_TSERVERS + "/" + tserver, null);
   HostAndPort replAddress = HostAndPort.fromString(new String(portData, UTF_8));
   replicationServices.add(replAddress);
  } catch (Exception e) {
   log.error("Could not find port for {}", tserver, e);
   Assert.fail("Did not find replication port advertisement for " + tserver);
  }
 }
 // Each tserver should also have equial replicaiton services running internally
 Assert.assertEquals("Expected an equal number of replication servicers and tservers",
   tserverHost.size(), replicationServices.size());
}

相关文章