本文整理了Java中water.H2O.size()
方法的一些代码示例,展示了H2O.size()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。H2O.size()
方法的具体详情如下:
包路径:water.H2O
类名称:H2O
方法名:size
暂无
代码示例来源:origin: h2oai/h2o-3
/**
* Compute the fraction of rows that need to be used for training during one iteration
* @param numRows number of training rows
* @param train_samples_per_iteration number of training rows to be processed per iteration
* @param replicate_training_data whether of not the training data is replicated on each node
* @return fraction of rows to be used for training during one iteration
*/
private float computeRowUsageFraction(final long numRows, final long train_samples_per_iteration, final boolean replicate_training_data) {
float rowUsageFraction = (float)train_samples_per_iteration / numRows;
if (replicate_training_data) rowUsageFraction /= H2O.CLOUD.size();
assert(rowUsageFraction > 0);
return rowUsageFraction;
}
private float rowFraction(Frame train, DeepLearningParameters p, DeepLearningModel m) {
代码示例来源:origin: h2oai/h2o-3
/**
* Compute the fraction of rows that need to be used for training during one iteration
* @param numRows number of training rows
* @param train_samples_per_iteration number of training rows to be processed per iteration
* @param replicate_training_data whether of not the training data is replicated on each node
* @return fraction of rows to be used for training during one iteration
*/
private float computeRowUsageFraction(final long numRows, final long train_samples_per_iteration, final boolean replicate_training_data) {
float rowUsageFraction = (float)train_samples_per_iteration / numRows;
if (replicate_training_data) rowUsageFraction /= H2O.CLOUD.size();
assert(rowUsageFraction > 0);
return rowUsageFraction;
}
private float rowFraction(Frame train, DeepWaterParameters p, DeepWaterModel m) {
代码示例来源:origin: h2oai/h2o-2
@Override
public void reduce(ChunkSummary mrt) {
if (mrt.chunk_counts == chunk_counts) return;
for (int j = 0; j < chunkTypes.length; ++j) {
chunk_counts[j] += mrt.chunk_counts[j];
chunk_byte_sizes[j] += mrt.chunk_byte_sizes[j];
}
for (int i = 0; i<H2O.CLOUD.size(); ++i) {
byte_size_per_node[i] += mrt.byte_size_per_node[i];
}
}
代码示例来源:origin: h2oai/h2o-2
/**
* Compute the fraction of rows that need to be used for training during one iteration
* @param numRows number of training rows
* @param train_samples_per_iteration number of training rows to be processed per iteration
* @param replicate_training_data whether of not the training data is replicated on each node
* @return fraction of rows to be used for training during one iteration
*/
private static float computeRowUsageFraction(final long numRows, final long train_samples_per_iteration, final boolean replicate_training_data) {
float rowUsageFraction = (float)train_samples_per_iteration / numRows;
if (replicate_training_data) rowUsageFraction /= H2O.CLOUD.size();
assert(rowUsageFraction > 0);
return rowUsageFraction;
}
private static float rowFraction(Frame train, DeepLearning p, DeepLearningModel m) {
代码示例来源:origin: h2oai/h2o-2
public static void waitForCloudSize(int x, long ms) {
long start = System.currentTimeMillis();
while( System.currentTimeMillis() - start < ms ) {
if( CLOUD.size() >= x && Paxos._commonKnowledge )
break;
try { Thread.sleep(100); } catch( InterruptedException ie ) { }
}
if( H2O.CLOUD.size() < x )
throw new RuntimeException("Cloud size under " + x);
}
代码示例来源:origin: h2oai/h2o-3
@Override
protected int desiredChunks(final Frame original_fr, boolean local) {
return _parms._reproducible ? 1 : (int) Math.min(4 * H2O.NUMCPUS * (local ? 1 : H2O.CLOUD.size()), original_fr.numRows());
}
代码示例来源:origin: h2oai/h2o-2
private void forkDTask(int i){
int nodeId = i%H2O.CLOUD.size();
forkDTask(i,H2O.CLOUD._memary[nodeId]);
}
private void forkDTask(final int i, H2ONode n){
代码示例来源:origin: h2oai/h2o-2
void set_next_Cloud( H2ONode[] h2os, int hash ) {
synchronized(this) {
int idx = _idx+1; // Unique 1-byte Cloud index
if( idx == 256 ) idx=1; // wrap, avoiding zero
CLOUDS[idx] = CLOUD = new H2O(h2os,hash,idx);
}
SELF._heartbeat._cloud_size=(char)CLOUD.size();
}
代码示例来源:origin: h2oai/h2o-2
@Override public void lcompute() {
_ok = new int[_files.length][H2O.CLOUD.size()];
for (int i = 0; i < _files.length; ++i) {
File f = new File(_files[i]);
if (f.exists() && (f.length()==_sizes[i]))
_ok[i][H2O.SELF.index()] = 1;
}
tryComplete();
}
代码示例来源:origin: h2oai/h2o-2
@Override
public void compute2() {
int k = 0;
for(int i = 0; i < H2O.CLOUD.size(); ++i)
for(int j = 0; j < H2O.CLOUD.size(); ++j){
if(i == j) continue;
dropTests[k++] = new UDPDropTester(H2O.CLOUD._memary[i],H2O.CLOUD._memary[j],msg_sizes,10);
}
ForkJoinTask.invokeAll(dropTests);
tryComplete();
}
}).join();
代码示例来源:origin: h2oai/h2o-2
@Override public boolean toHTML( StringBuilder sb ) {
DocGen.HTML.section(sb, "Export done. Key '" + src_key.toString() +
"' was written to " + (_local && H2O.CLOUD.size() > 1 ? H2O.SELF_ADDRESS + ":" : "") + path.toString());
return true;
}
代码示例来源:origin: h2oai/h2o-2
@Override public void lcompute() {
_result = new String[H2O.CLOUD.size()];
Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces();
StringBuilder sb = new StringBuilder();
for (Entry<Thread,StackTraceElement[]> el : allStackTraces.entrySet()) {
append(sb, el.getKey());
append(sb, el.getValue());
sb.append('\n');
}
_result[H2O.SELF.index()] = sb.toString();
tryComplete();
}
代码示例来源:origin: h2oai/h2o-2
@Override
public void run() {
try {
Thread.sleep (sleepMillis);
}
catch (Exception ignore) {};
if (H2O.SELF == H2O.CLOUD._memary[0]) {
if (OPT_ARGS.ga_hadoop_ver != null)
H2O.GA.postAsync(new EventHit("System startup info", "Hadoop version", OPT_ARGS.ga_hadoop_ver, 1));
H2O.GA.postAsync(new EventHit("System startup info", "Cloud", "Cloud size", CLOUD.size()));
}
}
}
代码示例来源:origin: h2oai/h2o-2
static public int onIce(String className) {
Integer I = MAP.get(className);
if( I != null ) return I;
// Need to install a new cloud-wide type ID for className
assert H2O.CLOUD.size() > 0 : "No cloud when getting type id for "+className;
int id = -1;
if( H2O.CLOUD.leader() != H2O.SELF ) // Not leader?
id = FetchId.fetchId(className);
return install(className,id);
}
代码示例来源:origin: h2oai/h2o-2
@Override public Response serve() {
String traces[] = new JStackCollectorTask().invokeOnAllNodes()._result;
nodes = new StackSummary[H2O.CLOUD.size()];
for( int i=0; i<nodes.length; i++ )
nodes[i] = new StackSummary(H2O.CLOUD._memary[i].toString(),traces[i]);
node_name = H2O.SELF.toString();
cloud_name = H2O.NAME;
time = DateFormat.getInstance().format(new Date());
for( int i=0; i<nodes.length; i++ )
Log.debug(Log.Tag.Sys.WATER,nodes[i].name,nodes[i].traces);
return Response.done(this);
}
代码示例来源:origin: h2oai/h2o-2
public T invokeOnAllNodes() {
H2O cloud = H2O.CLOUD;
Key[] args = new Key[cloud.size()];
String skey = "RunOnAll"+Key.rand();
for( int i = 0; i < args.length; ++i )
args[i] = Key.make(skey,(byte)0,Key.DFJ_INTERNAL_USER,cloud._memary[i]);
invoke(args);
for( Key arg : args ) DKV.remove(arg);
return self();
}
代码示例来源:origin: h2oai/h2o-2
@Override
public void compute2() {
_z = new Frame(_x.anyVec().makeZeros(_y.numCols()));
int total_cores = H2O.CLOUD.size()*H2O.NUMCPUS;
int chunksPerCol = _y.anyVec().nChunks();
int maxP = 256*total_cores/chunksPerCol;
Log.info("maxP = " + maxP);
_cntr = new AtomicInteger(maxP-1);
addToPendingCount(2*_y.numCols()-1);
for(int i = 0; i < Math.min(_y.numCols(),maxP); ++i)
forkVecTask(i);
}
代码示例来源:origin: h2oai/h2o-2
@Override protected Boolean defaultValue() {
// Can we allocate ALL of the dataset locally?
long bs = fr().byteSize();
if( !MemoryManager.tryReserveTaskMem(bs) ) return false;
// Also, do we have enough chunks to run it well globally?
if( fr().anyVec().nChunks() >= 2*H2O.CLOUD.size() ) return false;
// Less than 2 chunks per node, and fits locally... default to local-only
return true;
}
}
代码示例来源:origin: h2oai/h2o-2
public final void exec( int outputs, Frame fr, boolean run_local){
// Use first readable vector to gate home/not-home
fr.checkCompatible(); // Check for compatible vectors
if((_noutputs = outputs) > 0) _vid = fr.anyVec().group().reserveKeys(outputs);
_fr = fr; // Record vectors to work on
_nxx = (short)H2O.SELF.index(); _nhi = (short)H2O.CLOUD.size(); // Do Whole Cloud
_run_local = run_local; // Run locally by copying data, or run globally?
setupLocal0(); // Local setup
compute2();
}
代码示例来源:origin: h2oai/h2o-2
@Test public void testJStack() {
for( int i=0; i<10; i++ ) {
JStack js = new JStack();
js.serve();
Assert.assertEquals(js.nodes.length,H2O.CLOUD.size());
}
}
}
内容来源于网络,如有侵权,请联系作者删除!