mirror of https://github.com/k3s-io/k3s
Fixed some typos
parent
9659ab131a
commit
718d7df276
|
@ -25,7 +25,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/fields"
|
||||
)
|
||||
|
||||
// NewSourceAPIserver creates config source that watches for changes to the services and endpoints.
|
||||
// NewSourceAPI creates config source that watches for changes to the services and endpoints.
|
||||
func NewSourceAPI(c *client.Client, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) {
|
||||
servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything())
|
||||
endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything())
|
||||
|
|
|
@ -66,7 +66,7 @@ const iptablesMasqueradeMark = "0x4d415351"
|
|||
// ShouldUseIptablesProxier returns true if we should use the iptables Proxier
|
||||
// instead of the "classic" userspace Proxier. This is determined by checking
|
||||
// the iptables version and for the existence of kernel features. It may return
|
||||
// an error if it fails to get the itpables version without error, in which
|
||||
// an error if it fails to get the iptables version without error, in which
|
||||
// case it will also return false.
|
||||
func ShouldUseIptablesProxier() (bool, error) {
|
||||
exec := utilexec.New()
|
||||
|
@ -479,7 +479,7 @@ func (proxier *Proxier) syncProxyRules() {
|
|||
existingChains := make(map[utiliptables.Chain]string)
|
||||
iptablesSaveRaw, err := proxier.iptables.Save(utiliptables.TableNAT)
|
||||
if err != nil { // if we failed to get any rules
|
||||
glog.Errorf("Failed to execute iptable-save, syncing all rules. %s", err.Error())
|
||||
glog.Errorf("Failed to execute iptables-save, syncing all rules. %s", err.Error())
|
||||
} else { // otherwise parse the output
|
||||
existingChains = getChainLines(utiliptables.TableNAT, iptablesSaveRaw)
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ type Proxier struct {
|
|||
// assert Proxier is a ProxyProvider
|
||||
var _ proxy.ProxyProvider = &Proxier{}
|
||||
|
||||
// A key for the portMap. The ip has to be a tring because slices can't be map
|
||||
// A key for the portMap. The ip has to be a string because slices can't be map
|
||||
// keys.
|
||||
type portMapKey struct {
|
||||
ip string
|
||||
|
@ -208,7 +208,7 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
|
|||
// flush and delete chains.
|
||||
chains := []iptables.Chain{iptablesContainerPortalChain, iptablesHostPortalChain, iptablesHostNodePortChain, iptablesContainerNodePortChain}
|
||||
for _, c := range chains {
|
||||
// flush chain, then if sucessful delete, delete will fail if flush fails.
|
||||
// flush chain, then if successful delete, delete will fail if flush fails.
|
||||
if err := ipt.FlushChain(iptables.TableNAT, c); err != nil {
|
||||
glog.Errorf("Error flushing userspace chain: %v", err)
|
||||
encounteredError = true
|
||||
|
@ -338,7 +338,7 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol
|
|||
// How long we leave idle UDP connections open.
|
||||
const udpIdleTimeout = 1 * time.Second
|
||||
|
||||
// OnUpdate manages the active set of service proxies.
|
||||
// OnServiceUpdate manages the active set of service proxies.
|
||||
// Active service proxies are reinitialized if found in the update set or
|
||||
// shutdown if missing from the update set.
|
||||
func (proxier *Proxier) OnServiceUpdate(services []api.Service) {
|
||||
|
@ -913,7 +913,7 @@ func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, addDstLocalMatch b
|
|||
// If the proxy is bound (see Proxier.listenIP) to 0.0.0.0 ("any
|
||||
// interface") we want to do the same as from-container traffic and use
|
||||
// REDIRECT. Except that it doesn't work (empirically). REDIRECT on
|
||||
// localpackets sends the traffic to localhost (special case, but it is
|
||||
// local packets sends the traffic to localhost (special case, but it is
|
||||
// documented) but the response comes from the eth0 IP (not sure why,
|
||||
// truthfully), which makes DNS unhappy.
|
||||
//
|
||||
|
|
|
@ -91,7 +91,7 @@ func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType
|
|||
// This assumes that lb.lock is already held.
|
||||
func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affinityType api.ServiceAffinity, ttlMinutes int) *balancerState {
|
||||
if ttlMinutes == 0 {
|
||||
ttlMinutes = 180 //default to 3 hours if not specified. Should 0 be unlimeted instead????
|
||||
ttlMinutes = 180 //default to 3 hours if not specified. Should 0 be unlimited instead????
|
||||
}
|
||||
|
||||
if _, exists := lb.services[svcPort]; !exists {
|
||||
|
|
|
@ -106,7 +106,7 @@ type RestoreCountersFlag bool
|
|||
const RestoreCounters RestoreCountersFlag = true
|
||||
const NoRestoreCounters RestoreCountersFlag = false
|
||||
|
||||
// Option flag for Restore
|
||||
// Option flag for Flush
|
||||
type FlushFlag bool
|
||||
|
||||
const FlushTables FlushFlag = true
|
||||
|
@ -226,7 +226,7 @@ func (runner *runner) DeleteChain(table Table, chain Chain) error {
|
|||
runner.mu.Lock()
|
||||
defer runner.mu.Unlock()
|
||||
|
||||
// TODO: we could call iptable -S first, ignore the output and check for non-zero return (more like DeleteRule)
|
||||
// TODO: we could call iptables -S first, ignore the output and check for non-zero return (more like DeleteRule)
|
||||
out, err := runner.run(opDeleteChain, fullArgs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error deleting chain %q: %v: %s", chain, err, out)
|
||||
|
|
Loading…
Reference in New Issue