本文整理了Java中weka.core.Instances.<init>()
方法的一些代码示例,展示了Instances.<init>()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Instances.<init>()
方法的具体详情如下:
包路径:weka.core.Instances
类名称:Instances
方法名:<init>
[英]Reads an ARFF file from a reader, and assigns a weight of one to each instance. Lets the index of the class attribute be undefined (negative).
[中]从读取器读取ARFF文件,并为每个实例分配一个权重。允许未定义类属性的索引(负数)。
代码示例来源:origin: net.sf.meka/meka
/**
* Stack two Instances together row-wise.
*/
public static final Instances combineInstances(Instances D1, Instances D2) {
Instances D = new Instances(D1);
for(int i = 0; i < D2.numInstances(); i++) {
D.add(D2.instance(i));
}
return D;
}
代码示例来源:origin: stackoverflow.com
JFrame1 form = new JFrame1();
form.setVisible(true);
form.addPropertyChangeListener(new PropertyChangeListener() {
@Override
public void propertyChange(PropertyChangeEvent pce) {
// Handle the change here
String pth = (String) pce.getNewValue();
BufferedReader datafile = readDataFile(pth);
Instances data = new Instances(datafile);
data.setClassIndex(data.numAttributes() - 1);
(...)
}
});
代码示例来源:origin: stackoverflow.com
public static void LoadAndTest(String filename_test, String filename_model) throws Exception {
BufferedReader datafile_test = readDataFile(filename_test);
Instances data_test = new Instances(datafile_test);
data_test.setClassIndex(data_test.numAttributes() - 1);
Classifier cls = (Classifier) weka.core.SerializationHelper.read(filename_model);
int act = 0;
for (int i = 0; i < data_test.numInstances(); i++) {
double pred = cls.classifyInstance(data_test.instance(i));
double real = data_test.instance(i).classValue();
if (pred==real) {
act = act + 1;
}
}
double pct = (double) act / (double) data_test.numInstances();
System.out.println("Accuracy = " + pct);
}
代码示例来源:origin: net.sf.meka/meka
/**
* Transform - transform dataset D for this node.
* this.j defines the current node index, e.g., 3
* this.paY[] defines parents, e.g., [1,4]
* we should remove the rest, e.g., [0,2,5,...,L-1]
* @return dataset we should remove all variables from D EXCEPT current node, and parents.
*/
public Instances transform(Instances D) throws Exception {
int L = D.classIndex();
d = D.numAttributes() - L;
int keep[] = A.append(this.paY,j); // keep all parents and self!
Arrays.sort(keep);
int remv[] = A.invert(keep,L); // i.e., remove the rest < L
Arrays.sort(remv);
map = new int[L];
for(int j = 0; j < L; j++) {
map[j] = Arrays.binarySearch(keep,j);
}
Instances D_ = F.remove(new Instances(D),remv, false);
D_.setClassIndex(map[this.j]);
return D_;
}
代码示例来源:origin: net.sf.meka/meka
@Override
public Instance transformInstance(Instance x) throws Exception{
Instances tmpInst = new Instances(x.dataset());
tmpInst.delete();
tmpInst.add(x);
Instances features = this.extractPart(tmpInst, false);
Instances pseudoLabels = new Instances(this.compressedTemplateInst);
Instance tmpin = pseudoLabels.instance(0);
pseudoLabels.delete();
pseudoLabels.add(tmpin);
for ( int i = 0; i< pseudoLabels.classIndex(); i++) {
pseudoLabels.instance(0).setMissing(i);
}
Instances newDataSet = Instances.mergeInstances(pseudoLabels, features);
newDataSet.setClassIndex(pseudoLabels.numAttributes());
return newDataSet.instance(0);
}
代码示例来源:origin: sc.fiji/Trainable_Segmentation
/**
* bag class for getting the result of the loaded classifier
*/
private static class LoadedClassifier {
private AbstractClassifier newClassifier = null;
private Instances newHeader = null;
}
代码示例来源:origin: net.sf.meka/meka
protected Instances convert(Instances D, int j, int k) {
int L = D.classIndex();
D = new Instances(D);
D.insertAttributeAt(classAttribute,0);
D.setClassIndex(0);
for(int i = 0; i < D.numInstances(); i++) {
String c = (String)((int)Math.round(D.instance(i).value(j+1))+""+(int)Math.round(D.instance(i).value(k+1)));
D.instance(i).setClassValue(c);
}
for (int i = 0; i < L; i++)
D.deleteAttributeAt(1);
m_InstancesTemplate = new Instances(D,0);
return D;
}
代码示例来源:origin: Stratio/wikipedia-parser
public GroupFeature(List<FeatureExtractor> features) {
this.features = ImmutableList.copyOf(features);
ImmutableList.Builder<Attribute> result = ImmutableList.builder();
for (FeatureExtractor fe: this.features) {
for (Attribute att: fe.attributes()) {
result.add((Attribute)att.copy());
}
}
_attributes = result.build();
_instances = new Instances("FOO", newArrayList(_attributes), 0);
result = ImmutableList.builder();
for (int i = 0; i < _instances.numAttributes(); i++) {
result.add(_instances.attribute(i));
}
_attributes = result.build();
}
代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable
/**
* performs a typical test
*/
public void testTypical() {
Instances icopy = new Instances(m_Instances);
m_Filter = getFilter();
Instances result = useFilter();
assertEquals(result.numAttributes(), icopy.numInstances() + 1);
}
代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable
public void testPruneMinFreq() throws Exception {
Instances data1 = getData1();
Instances structure = new Instances(data1, 0);
DictionaryBuilder builder = new DictionaryBuilder();
builder.setMinTermFreq(1);
builder.setup(structure);
for (int i = 0; i < data1.numInstances(); i++) {
builder.processInstance(data1.instance(i));
}
assertEquals(15, builder.getDictionaries(false)[0].size());
Map<String, int[]> consolidated = builder.finalizeDictionary();
// min freq of 1 should keep all terms
assertEquals(15, consolidated.size());
}
代码示例来源:origin: net.sf.meka/meka
public static double[][] LEAD(Instances D, Classifier h, Random r, String MDType) throws Exception {
Instances D_r = new Instances(D);
D_r.randomize(r);
Instances D_train = new Instances(D_r,0,D_r.numInstances()*60/100);
Instances D_test = new Instances(D_r,D_train.numInstances(),D_r.numInstances()-D_train.numInstances());
BR br = new BR();
br.setClassifier(h);
Result result = Evaluation.evaluateModel((MultiLabelClassifier)br,D_train,D_test,"PCut1","1");
return LEAD(D_test, result, MDType);
}
代码示例来源:origin: Waikato/wekaDeeplearning4j
@Override
public double[] distributionForInstance(Instance instance) throws Exception {
Instances data = new Instances(instance.dataset());
data.add(instance);
return distributionsForInstances(data)[0];
}
代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable
/**
* performs the application with no options set
*/
public void testDefault() {
Instances icopy = new Instances(m_Instances);
m_Filter = getFilter();
Instances result = useFilter();
assertEquals(result.numAttributes(), icopy.numAttributes());
}
代码示例来源:origin: sc.fiji/T2-NIT
Operator() {
ArrayList<Attribute> a = new ArrayList<Attribute>();
for (int i=0; i<attrs.length-1; i++) {
a.add(new Attribute(attrs[i])); // numeric
}
ArrayList<String> d = new ArrayList<String>();
d.add("false");
d.add("true");
a.add(new Attribute(attrs[attrs.length-1], d)); // nominal attribute
data = new Instances("Buh", a, 0);
data.setClassIndex(attrs.length-1); // the CLASS
}
}
代码示例来源:origin: nz.ac.waikato.cms.weka/weka-stable
private Instances parseTransactionsMustContain(Instances data) {
String[] split = m_transactionsMustContain.trim().split(",");
boolean[] transactionsMustContainIndexes = new boolean[data.numAttributes()];
int numInTransactionsMustContainList = split.length;
for (String element : split) {
String attName = element.trim();
Attribute att = data.attribute(attName);
if (att == null) {
System.err.println("[FPGrowth] : WARNING - can't find attribute "
+ attName + " in the data.");
numInTransactionsMustContainList--;
} else {
transactionsMustContainIndexes[att.index()] = true;
}
}
if (numInTransactionsMustContainList == 0) {
return data;
} else {
Instances newInsts = new Instances(data, 0);
for (int i = 0; i < data.numInstances(); i++) {
if (passesMustContain(data.instance(i), transactionsMustContainIndexes,
numInTransactionsMustContainList)) {
newInsts.add(data.instance(i));
}
}
newInsts.compactify();
return newInsts;
}
}
代码示例来源:origin: Waikato/meka
/**
* Transform - transform dataset D for this node.
* this.j defines the current node index, e.g., 3
* this.paY[] defines parents, e.g., [1,4]
* we should remove the rest, e.g., [0,2,5,...,L-1]
* @return dataset we should remove all variables from D EXCEPT current node, and parents.
*/
public Instances transform(Instances D) throws Exception {
int L = D.classIndex();
d = D.numAttributes() - L;
int keep[] = A.append(this.paY,j); // keep all parents and self!
Arrays.sort(keep);
int remv[] = A.invert(keep,L); // i.e., remove the rest < L
Arrays.sort(remv);
map = new int[L];
for(int j = 0; j < L; j++) {
map[j] = Arrays.binarySearch(keep,j);
}
Instances D_ = F.remove(new Instances(D),remv, false);
D_.setClassIndex(map[this.j]);
return D_;
}
代码示例来源:origin: Waikato/meka
/**
* Stack two Instances together row-wise.
*/
public static final Instances combineInstances(Instances D1, Instances D2) {
Instances D = new Instances(D1);
for(int i = 0; i < D2.numInstances(); i++) {
D.add(D2.instance(i));
}
return D;
}
代码示例来源:origin: Waikato/meka
@Override
public Instance transformInstance(Instance x) throws Exception{
Instances tmpInst = new Instances(x.dataset());
tmpInst.delete();
tmpInst.add(x);
Instances features = this.extractPart(tmpInst, false);
Instances pseudoLabels = new Instances(this.compressedTemplateInst);
Instance tmpin = pseudoLabels.instance(0);
pseudoLabels.delete();
pseudoLabels.add(tmpin);
for ( int i = 0; i< pseudoLabels.classIndex(); i++) {
pseudoLabels.instance(0).setMissing(i);
}
Instances newDataSet = Instances.mergeInstances(pseudoLabels, features);
newDataSet.setClassIndex(pseudoLabels.numAttributes());
return newDataSet.instance(0);
}
代码示例来源:origin: fiji/Trainable_Segmentation
/**
* bag class for getting the result of the loaded classifier
*/
private static class LoadedClassifier {
private AbstractClassifier newClassifier = null;
private Instances newHeader = null;
}
代码示例来源:origin: Waikato/wekaDeeplearning4j
/**
* Load the diabetes arff file
*
* @return Diabetes data as Instances
* @throws Exception IO error.
*/
public static Instances loadDiabetes() throws Exception {
Instances data =
new Instances(new FileReader("src/test/resources/numeric/diabetes_numeric.arff"));
data.setClassIndex(data.numAttributes() - 1);
return data;
}
内容来源于网络,如有侵权,请联系作者删除!