使用logstash将文档嵌套到elasticsearch

时间:2020-01-30 09:48:06

标签: elasticsearch logstash logstash-configuration logstash-jdbc

大家好,我正在尝试使用Logstash将MSSQL服务器中的文档索引到elasticsearch。我希望我的文档以嵌套文档的形式获取,但出现聚合异常错误

我在这里放置了所有代码

Create table department(
ID Int identity(1,1) not null,
Name varchar(100)
)

Insert into department(Name)
Select 'IT Application development'

union all

Select 'HR & Marketing'

Create table Employee(
ID Int identity(1,1) not null,
emp_Name varchar(100),
dept_Id int
)

Insert into Employee(emp_Name,dept_Id)
Select 'Mohan',1
union all
Select 'parthi',1
union all
Select 'vignesh',1

Insert into Employee(emp_Name,dept_Id)
Select 'Suresh',2
union all
Select 'Jithesh',2
union all
Select 'Venkat',2

最终选择语句

SELECT 
De.id AS id,De.name AS deptname,Emp.id AS empid,Emp.emp_name AS empname
FROM  department De LEFT JOIN employee Emp ON De.id = Emp.dept_Id
ORDER BY De.id

结果应该是这样

enter image description here

我的弹性搜索映射

PUT /departments
{
 "mappings": {
   "properties": {
     "id":{
       "type":"integer"
     },
     "deptname":{
       "type":"text"
     },
     "employee_details":{
       "type": "nested",
       "properties": {
         "empid":{
           "type":"integer"
         },
         "empname":{
           "type":"text"
         }
       }
     }
   }
 }
}

我的logstash配置文件

input {
jdbc {
jdbc_driver_library => ""
jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
jdbc_connection_string => "jdbc:sqlserver://EC2AMAZ-J90JR4A\SQLEXPRESS:1433;databaseName=xxxx;"
jdbc_user => "xxxx"
jdbc_password => "xxxx"
statement => "SELECT 
De.id AS id,De.name AS deptname,Emp.id AS empid,Emp.emp_name AS empname
FROM  department De LEFT JOIN employee Emp ON De.id = Emp.dept_Id
ORDER BY De.id"
}
}
filter{
        aggregate {
        task_id => "%{id}"
        code => "
        map['id'] = event['id']
        map['deptname'] = event['deptname']
        map['employee_details'] ||= []
        map['employee_details'] << {'empId' => event['empid'], 'empname' => event['empname'] }
        "
        push_previous_map_as_event => true
        timeout => 5
        timeout_tags => ['aggregated']
        } 
    }
    output{
    stdout{ codec => rubydebug }
    elasticsearch{
            hosts => "https://d9bc7cbca5ec49ea96a6ea683f70caca.eastus2.azure.elastic-cloud.com:4567"
            user => "elastic"
            password => "****"
            index => "departments"
            action => "index"
            document_type => "departments"
            document_id => "%{id}"
           }

}

运行logstash时出现错误

enter image description here

弹性搜索快照供参考

enter image description here

我的elasticsearch输出应该是这样的

{
  "took" : 398,
  "timed_out" : false,
  "_shards" : {
    "total" : 1,
    "successful" : 1,
    "skipped" : 0,
    "failed" : 0
  },
  "hits" : {
    "total" : {
      "value" : 1,
      "relation" : "eq"
    },
    "max_score" : 1.0,
    "hits" : [
      {
        "_index" : "departments",
        "_type" : "_doc",
        "_id" : "2",
        "_score" : 1.0,
        "_source" : {
          "id" : 1,
          "deptname" : "IT Application development"
          "employee_details" : [
          {
          "empid" : 1,
          "empname" : "Mohan"
           },
          {
          "empid" : 2,
          "empname" : "Parthi"
          },
          {
          "empid" : 3,
          "empname" : "Vignesh"
           }
          ]
         }
      }
    ]
  }
}

有人可以帮助我解决此问题吗?我希望所有员工的empname和empid都应作为相应部门的嵌套文档插入。预先感谢

2 个答案:

答案 0 :(得分:0)

我使用的不是JDBC_STREAMING聚合过滤器,它可以很好地工作,可能对某些看这篇文章的人有所帮助。

input {
jdbc {
jdbc_driver_library => "D:/Users/xxxx/Desktop/driver/mssql-jdbc-7.4.1.jre12-shaded.jar"
jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
jdbc_connection_string => "jdbc:sqlserver://EC2AMAZ-J90JR4A\SQLEXPRESS:1433;databaseName=xxx;"
jdbc_user => "xxx"
jdbc_password => "xxxx"
statement => "Select Policyholdername,Age,Policynumber,Dob,Client_Address,is_active from policy"
}
}
filter{
jdbc_streaming {
jdbc_driver_library => "D:/Users/xxxx/Desktop/driver/mssql-jdbc-7.4.1.jre12-shaded.jar"
jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
jdbc_connection_string => "jdbc:sqlserver://EC2AMAZ-J90JR4A\SQLEXPRESS:1433;databaseName=xxxx;"
jdbc_user => "xxxx"
jdbc_password => "xxxx"
statement => "select claimnumber,claimtype,is_active from claim where policynumber = :policynumber"
parameters => {"policynumber" => "policynumber"}
target => "claim_details"
}
}
output {
elasticsearch {
hosts => "https://e5a4a4a4de7940d9b12674d62eac9762.eastus2.azure.elastic-cloud.com:9243"
user => "elastic"
password => "xxxx"
index => "xxxx"
action => "index"
document_type => "_doc"
document_id => "%{policynumber}"

}
stdout { codec => rubydebug }
}

答案 1 :(得分:0)

您还可以尝试在Logstash过滤器插件中使用聚合。检查一下 Inserting Nested Objects using Logstash

https://xyzcoder.github.io/2020/07/29/indexing-documents-using-logstash-and-python.html

我只是显示一个对象,但我们也可以有多个项目数组

input {
    jdbc {
           jdbc_driver_library => "/usr/share/logstash/javalib/mssql-jdbc-8.2.2.jre11.jar"
           jdbc_driver_class => "com.microsoft.sqlserver.jdbc.SQLServerDriver"
           jdbc_connection_string => "jdbc:sqlserver://host.docker.internal;database=StackOverflow2010;user=pavan;password=pavankumar@123"
           jdbc_user => "pavan"
           jdbc_password => "pavankumar@123"
           statement => "select top 500 p.Id as PostId,p.AcceptedAnswerId,p.AnswerCount,p.Body,u.Id as userid,u.DisplayName,u.Location
                        from StackOverflow2010.dbo.Posts p inner join StackOverflow2010.dbo.Users u
                        on p.OwnerUserId=u.Id"
        }
}

filter {
    aggregate {
        task_id => "%{postid}"
        code => "
            map['postid'] = event.get('postid')
            map['accepted_answer_id'] = event.get('acceptedanswerid')
            map['answer_count'] = event.get('answercount')
            map['body'] = event.get('body')
            map['user'] = {
                'id' => event.get('userid'),
                'displayname' => event.get('displayname'),
                'location' => event.get('location')
            }
        event.cancel()"
        push_previous_map_as_event => true
        timeout => 30
    }
}

output {
    elasticsearch {
        hosts => ["http://elasticsearch:9200", "http://elasticsearch:9200"]
        index => "stackoverflow_top"
    }
    stdout {
        codec => rubydebug
    }
}

因此在该示例中,我有多种插入数据的方式,例如聚合,JDBC流和其他方案

相关问题