无法使用apache storm

时间:2018-06-17 07:54:41

标签: java apache-kafka apache-storm

我开发了一个使用apache storm消费kafka消息的应用程序,当我在eclipse中使用LocalCluster运行拓扑时它工作正常并且消息正常消耗,但是当我使用storm命令(bin \ storm jar)运行时.. \ kafka-storm-0.0.1-SNAPSHOT.jar com.kafka_storm.util.Topology storm-kafka-topology),拓扑开始但无法消费任何消息,我正在做错事,或指导我可以做些什么来找到问题

拓扑代码

public class Topology {

public Properties configs;
public BoltBuilder boltBuilder;
public SpoutBuilder spoutBuilder;   

public Topology(String configFile) throws Exception {
    configs = new Properties();

    InputStream is = null;
    try {
        is = this.getClass().getResourceAsStream("/application.properties");
        configs.load(is);
        //configs.load(Topology.class.getResourceAsStream("/application.properties"));
        boltBuilder = new BoltBuilder(configs);
        spoutBuilder = new SpoutBuilder(configs);
    } catch (Exception ex) {
        ex.printStackTrace();
        System.exit(0);
    }
}

private void submitTopology() throws Exception {
    System.out.println("Entered in submitTopology");
    TopologyBuilder builder = new TopologyBuilder();    
    KafkaSpout<?, ?> kafkaSpout = spoutBuilder.buildKafkaSpout();
    SinkTypeBolt sinkTypeBolt = boltBuilder.buildSinkTypeBolt();
    MongoDBBolt mongoBolt = boltBuilder.buildMongoDBBolt();


    //set the kafkaSpout to topology
    //parallelism-hint for kafkaSpout - defines number of executors/threads to be spawn per container
    int kafkaSpoutCount = Integer.parseInt(configs.getProperty(Keys.KAFKA_SPOUT_COUNT));
    builder.setSpout(configs.getProperty(Keys.KAFKA_SPOUT_ID), kafkaSpout, kafkaSpoutCount);


    //set the sinktype bolt
    int sinkBoltCount = Integer.parseInt(configs.getProperty(Keys.SINK_BOLT_COUNT));
    builder.setBolt(configs.getProperty(Keys.SINK_TYPE_BOLT_ID),sinkTypeBolt,sinkBoltCount).shuffleGrouping(configs.getProperty(Keys.KAFKA_SPOUT_ID));

    //set the mongodb bolt
    int mongoBoltCount = Integer.parseInt(configs.getProperty(Keys.MONGO_BOLT_COUNT));
    builder.setBolt(configs.getProperty(Keys.MONGO_BOLT_ID),mongoBolt,mongoBoltCount).shuffleGrouping(configs.getProperty(Keys.SINK_TYPE_BOLT_ID),Keys.MONGODB_STREAM);


    String topologyName = configs.getProperty(Keys.TOPOLOGY_NAME);

    Config conf = new Config();
    //Defines how many worker processes have to be created for the topology in the cluster.
    conf.setNumWorkers(1);

    System.out.println("Submitting Topology");
    //StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
    System.out.println("Topology submitted");

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology(topologyName, conf, builder.createTopology());
}

public static void main(String[] args) throws Exception {
    String configFile;
    if (args.length == 0) {
        System.out.println("Missing input : config file location, using default");
        configFile = "application.properties";
    } else{
        configFile = args[0];
    }

    Topology ingestionTopology = new Topology(configFile);
    ingestionTopology.submitTopology();
}

}

Spout Code

public class SpoutBuilder {

public Properties configs = null;

public SpoutBuilder(Properties configs) {
    this.configs = configs;
}
public KafkaSpout<?, ?> buildKafkaSpout() {
    String servers = configs.getProperty(Keys.KAFKA_BROKER);
    String topic = configs.getProperty(Keys.KAFKA_TOPIC);
    String group = configs.getProperty(Keys.KAFKA_CONSUMERGROUP);

    return new KafkaSpout<>(getKafkaSpoutConfig(servers,topic,group));
}

protected KafkaSpoutConfig<String, String> getKafkaSpoutConfig(String bootstrapServers, String topic, String group) {
    return KafkaSpoutConfig.builder(bootstrapServers, new String[]{topic})
        .setProp(ConsumerConfig.GROUP_ID_CONFIG, group)
        .setRetry(getRetryService())
        .setOffsetCommitPeriodMs(10_000)
        .setFirstPollOffsetStrategy(FirstPollOffsetStrategy.UNCOMMITTED_LATEST)
        .setMaxUncommittedOffsets(250)
        .setProcessingGuarantee(ProcessingGuarantee.AT_LEAST_ONCE)
        .setTupleTrackingEnforced(true)
        .setEmitNullTuples(false)
        .setRecordTranslator(new DefaultRecordTranslator<String, String>())
        .build();
}

protected KafkaSpoutRetryService getRetryService() {
    return new KafkaSpoutRetryExponentialBackoff(TimeInterval.microSeconds(500),
        TimeInterval.milliSeconds(2), Integer.MAX_VALUE, TimeInterval.seconds(10));
}

}

Bolt Builder

public class BoltBuilder {

public Properties configs = null;

public BoltBuilder(Properties configs) {
    this.configs = configs;
}

public SinkTypeBolt buildSinkTypeBolt() {
    return new SinkTypeBolt();
}

public MongoDBBolt buildMongoDBBolt() {
    String host = configs.getProperty(Keys.MONGO_HOST);
    int port = Integer.parseInt(configs.getProperty(Keys.MONGO_PORT));
    String db = configs.getProperty(Keys.MONGO_DATABASE);
    String collection = configs.getProperty(Keys.MONGO_COLLECTION);
    return new MongoDBBolt(host, port, db, collection);
}

}

SinkTypeBolt代码

public class SinkTypeBolt extends BaseRichBolt {
private static final long serialVersionUID = 1L;
private OutputCollector collector;

public void execute(Tuple tuple) {
    String value = tuple.getString(4);
    System.out.println("Received in SinkType bolt : "+value);
    if (value != null && !value.isEmpty()){
        collector.emit(Keys.MONGODB_STREAM,new Values(value));
        System.out.println("Emitted : "+value);
    }
    collector.ack(tuple);   
}

public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
}

public void declareOutputFields(OutputFieldsDeclarer declarer) {
    declarer.declareStream(Keys.MONGODB_STREAM, new Fields("content"));
}

}

MongoDB Bolt

public class MongoDBBolt extends BaseRichBolt {
private static final long serialVersionUID = 1L;
private OutputCollector collector;
private MongoDatabase mongoDB;
private MongoClient mongoClient;
private String collection;

public String host;
public int port ;
public String db;

protected MongoDBBolt(String host, int port, String db,String collection) {
    this.host = host;
    this.port = port;
    this.db = db;
    this.collection = collection;
}

public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
    this.collector = collector;
    this.mongoClient = new MongoClient(host,port);
    this.mongoDB = mongoClient.getDatabase(db);
}

public void execute(Tuple input) {
    Document mongoDoc = getMongoDocForInput(input);
    try{
        mongoDB.getCollection(collection).insertOne(mongoDoc);
        collector.ack(input);
    }catch(Exception e) {
        e.printStackTrace();
        collector.fail(input);
    }
}

@Override
public void cleanup() {
    this.mongoClient.close();
}

public void declareOutputFields(OutputFieldsDeclarer declarer) {
    // TODO Auto-generated method stub
}

public Document  getMongoDocForInput(Tuple input) {
    Document doc = new Document();
    String content = (String) input.getValueByField("content");
    String[] parts = content.trim().split(" ");
    System.out.println("Received in MongoDB bolt "+content);
    try {
        for(String part : parts) {
            String[] subParts = part.split(":");
            String fieldName = subParts[0];
            String value = subParts[1];
            doc.append(fieldName, value);
        }
    } catch(Exception e) {

    }
    return doc;
}

}

pom.xml代码

<properties>
    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    <maven.compiler.source>1.7</maven.compiler.source>
    <maven.compiler.target>1.7</maven.compiler.target>
</properties>

<dependencies>
    <dependency>
        <groupId>junit</groupId>
        <artifactId>junit</artifactId>
        <version>3.8.1</version>
        <scope>test</scope>
    </dependency>
    <dependency>
        <groupId>org.apache.storm</groupId>
        <artifactId>storm-core</artifactId>
        <version>1.2.2</version>
        <scope>provided</scope>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>org.apache.kafka</groupId>
        <artifactId>kafka-clients</artifactId>
        <version>1.1.0</version>
        <exclusions>
            <exclusion>
                <groupId>org.slf4j</groupId>
                <artifactId>slf4j-log4j12</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    <dependency>
        <groupId>org.apache.storm</groupId>
        <artifactId>storm-kafka-client</artifactId>
        <version>1.2.2</version>
    </dependency>
    <dependency>
        <groupId>org.mongodb</groupId>
        <artifactId>mongo-java-driver</artifactId>
        <version>3.0.4</version>
    </dependency>
    <dependency>
        <groupId>com.googlecode.json-simple</groupId>
        <artifactId>json-simple</artifactId>
        <version>1.1</version>
    </dependency>
</dependencies>

<build>
    <plugins>
        <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-shade-plugin</artifactId>
            <version>1.4</version>
            <configuration>
                <createDependencyReducedPom>true</createDependencyReducedPom>
            </configuration>
            <executions>
                <execution>
                    <phase>package</phase>
                    <goals>
                        <goal>shade</goal>
                    </goals>
                    <configuration>
                        <transformers>
                            <transformer
                                implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
                            <transformer
                                implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
                                <mainClass>com.kafka_storm.util.Topology</mainClass>
                            </transformer>
                        </transformers>
                    </configuration>
                </execution>
            </executions>
        </plugin>
        <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-resources-plugin</artifactId>
            <version>2.4</version>
        </plugin>
    </plugins>
    <resources>
        <resource>
            <directory>src/main/java</directory>
            <includes>
                <include> **/*.properties</include>
            </includes>
        </resource>
    </resources>
</build>

Storm UI

1 个答案:

答案 0 :(得分:1)

可以肯定的是,当您使用storm jar提交拓扑时,您记得在拓扑中使用StormSubmitter行而不是LocalCluster,对吗?

另外,请检查您是否已启动所有正确的守护程序,即storm nimbusstorm supervisor应至少运行(加上您的Zookeeper安装)

接下来要查看的地方是您的日志文件。在Storm目录中,您将拥有logs目录。查看logs/worker-artifacts/<your-topology-id>/<your-worker-port>/worker.log文件。这些将有希望让你走上正确的轨道,找出正在发生的事情。我打开Storm UI,找到你的鲸鱼喷水,然后查看它正在运行的工作端口,这样你就可以查看正确的日志文件。

相关问题